From 8d8f3057997b434c73f54737d363aa1567bfddee Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Wed, 21 May 2025 18:37:13 +0200 Subject: [PATCH 1/5] HADOOP-19574 Restore Subject propagation semantics for Java 22+ --- .../hadoop/conf/ReconfigurableBase.java | 6 +- .../apache/hadoop/fs/CachingGetSpaceUsed.java | 3 +- .../hadoop/fs/DelegationTokenRenewer.java | 5 +- .../java/org/apache/hadoop/fs/FileSystem.java | 3 +- .../org/apache/hadoop/ha/HealthMonitor.java | 2 +- .../org/apache/hadoop/ha/StreamPumper.java | 3 +- .../hadoop/io/retry/AsyncCallHandler.java | 2 +- .../java/org/apache/hadoop/ipc/Client.java | 7 +- .../java/org/apache/hadoop/ipc/Server.java | 18 +-- .../metrics2/impl/MetricsSinkAdapter.java | 7 +- .../hadoop/net/unix/DomainSocketWatcher.java | 4 +- .../hadoop/security/UserGroupInformation.java | 4 +- .../AbstractDelegationTokenSecretManager.java | 6 +- .../service/launcher/InterruptEscalator.java | 3 +- .../apache/hadoop/util/AsyncDiskService.java | 3 +- .../BlockingThreadPoolExecutorService.java | 3 +- .../java/org/apache/hadoop/util/Daemon.java | 45 +++++++- .../org/apache/hadoop/util/GcTimeMonitor.java | 6 +- .../java/org/apache/hadoop/util/Shell.java | 5 +- .../hadoop/util/ShutdownHookManager.java | 5 +- .../hadoop/util/concurrent/HadoopThread.java | 105 ++++++++++++++++++ .../apache/hadoop/conf/TestConfiguration.java | 9 +- .../hadoop/conf/TestReconfiguration.java | 3 +- .../hadoop/fs/FCStatisticsBaseTest.java | 5 +- .../hadoop/fs/TestFileSystemCaching.java | 6 +- .../java/org/apache/hadoop/fs/TestTrash.java | 7 +- .../fs/loadGenerator/LoadGenerator.java | 5 +- .../org/apache/hadoop/io/TestMD5Hash.java | 9 +- .../java/org/apache/hadoop/io/TestText.java | 5 +- .../hadoop/io/nativeio/TestNativeIO.java | 6 +- .../hadoop/io/nativeio/TestNativeIoInit.java | 17 +-- .../hadoop/io/retry/TestFailoverProxy.java | 9 +- .../org/apache/hadoop/ipc/TestAsyncIPC.java | 13 ++- .../hadoop/ipc/TestCallQueueManager.java | 9 +- .../apache/hadoop/ipc/TestFairCallQueue.java | 5 +- .../java/org/apache/hadoop/ipc/TestIPC.java | 17 +-- .../hadoop/ipc/TestIPCServerResponder.java | 5 +- .../ipc/TestProtoBufRpcServerHandoff.java | 5 +- .../java/org/apache/hadoop/ipc/TestRPC.java | 3 +- .../hadoop/ipc/TestRPCWaitForProxy.java | 5 +- .../hadoop/ipc/TestRpcServerHandoff.java | 5 +- .../apache/hadoop/ipc/TestSocketFactory.java | 3 +- .../hadoop/metrics2/impl/TestSinkQueue.java | 9 +- .../metrics2/lib/TestMutableMetrics.java | 7 +- .../metrics2/source/TestJvmMetrics.java | 5 +- .../hadoop/net/unix/TestDomainSocket.java | 20 ++-- .../net/unix/TestDomainSocketWatcher.java | 9 +- .../security/TestAuthorizationContext.java | 3 +- .../hadoop/security/TestGroupsCaching.java | 13 ++- .../security/TestLdapGroupsMapping.java | 6 +- .../security/TestUserGroupInformation.java | 5 +- .../hadoop/service/TestServiceLifecycle.java | 3 +- .../launcher/testservices/RunningService.java | 3 +- .../hadoop/test/MultithreadedTestUtil.java | 5 +- .../test/TestTimedOutTestsListener.java | 5 +- .../hadoop/util/TestAutoCloseableLock.java | 11 +- .../hadoop/util/TestInstrumentedLock.java | 11 +- .../util/TestInstrumentedReadWriteLock.java | 17 +-- .../apache/hadoop/util/TestPureJavaCrc32.java | 7 +- .../hadoop/util/TestReflectionUtils.java | 7 +- .../org/apache/hadoop/util/TestShell.java | 9 +- .../util/TestShutdownThreadsHelper.java | 3 +- .../crypto/key/kms/server/KMSBenchmark.java | 5 +- .../registry/server/dns/RegistryDNS.java | 3 +- .../server/services/RegistryAdminService.java | 3 +- .../org/apache/hadoop/hdfs/DataStreamer.java | 4 +- .../apache/hadoop/hdfs/DeadNodeDetector.java | 7 +- .../hadoop/hdfs/LocatedBlocksRefresher.java | 2 +- .../hdfs/util/CombinedHostsFileReader.java | 4 +- .../ha/TestRequestHedgingProxyProvider.java | 5 +- .../hdfs/util/TestByteArrayManager.java | 11 +- .../hdfs/nfs/nfs3/AsyncDataService.java | 3 +- .../hdfs/nfs/nfs3/OpenFileCtxCache.java | 2 +- .../apache/hadoop/hdfs/nfs/TestUdpServer.java | 9 +- .../resolver/order/RouterResolver.java | 3 +- .../federation/router/ConnectionManager.java | 5 +- .../router/MountTableRefresherThread.java | 5 +- .../hdfs/server/federation/router/Router.java | 5 +- .../router/RouterHeartbeatService.java | 3 +- .../federation/router/RouterRpcServer.java | 3 +- ...RouterRefreshFairnessPolicyController.java | 7 +- .../router/TestRouterFederationRename.java | 3 +- .../TestRouterMountTableCacheRefresh.java | 2 +- .../federation/router/TestRouterRpc.java | 3 +- .../router/async/utils/SyncClass.java | 4 +- .../server/blockmanagement/BlockManager.java | 8 +- .../CacheReplicationMonitor.java | 5 +- .../blockmanagement/SlowDiskTracker.java | 3 +- .../hadoop/hdfs/server/common/Storage.java | 6 +- .../hdfs/server/datanode/BPServiceActor.java | 9 +- .../hadoop/hdfs/server/datanode/DataNode.java | 5 +- .../hdfs/server/datanode/VolumeScanner.java | 5 +- .../impl/FsDatasetAsyncDiskService.java | 3 +- .../datanode/fsdataset/impl/FsVolumeList.java | 9 +- .../impl/RamDiskAsyncLazyPersistService.java | 3 +- .../hdfs/server/namenode/Checkpointer.java | 2 +- .../hdfs/server/namenode/FSEditLogAsync.java | 3 +- .../hadoop/hdfs/server/namenode/FSImage.java | 4 +- .../namenode/FSImageFormatProtobuf.java | 5 +- .../hadoop/hdfs/server/namenode/NameNode.java | 4 +- .../server/namenode/ha/EditLogTailer.java | 5 +- .../namenode/ha/StandbyCheckpointer.java | 5 +- .../hdfs/TestAppendSnapshotTruncate.java | 3 +- ...TestClientProtocolForPipelineRecovery.java | 7 +- .../hadoop/hdfs/TestDFSClientRetries.java | 15 +-- .../hadoop/hdfs/TestDFSOutputStream.java | 5 +- .../org/apache/hadoop/hdfs/TestDFSShell.java | 5 +- .../apache/hadoop/hdfs/TestDatanodeDeath.java | 9 +- .../hadoop/hdfs/TestDeadNodeDetection.java | 3 +- .../apache/hadoop/hdfs/TestDecommission.java | 3 +- .../hdfs/TestDecommissionWithStriped.java | 7 +- .../apache/hadoop/hdfs/TestFileAppend2.java | 5 +- .../apache/hadoop/hdfs/TestFileAppend3.java | 9 +- .../apache/hadoop/hdfs/TestFileAppend4.java | 9 +- .../hadoop/hdfs/TestFileConcurrentReader.java | 9 +- .../hadoop/hdfs/TestFileCreationClient.java | 5 +- .../hadoop/hdfs/TestMultiThreadedHflush.java | 9 +- .../hadoop/hdfs/TestParallelReadUtil.java | 5 +- .../java/org/apache/hadoop/hdfs/TestRead.java | 3 +- ...TestReplaceDatanodeFailureReplication.java | 5 +- .../hdfs/TestReplaceDatanodeOnFailure.java | 5 +- .../client/impl/TestBlockReaderFactory.java | 7 +- .../server/balancer/TestBalancerService.java | 4 +- .../blockmanagement/TestBlockManager.java | 3 +- .../server/datanode/BlockReportTestBase.java | 5 +- .../server/datanode/TestBPOfferService.java | 3 +- .../server/datanode/TestBlockRecovery.java | 5 +- .../server/datanode/TestBlockRecovery2.java | 5 +- .../datanode/TestDataNodeHotSwapVolumes.java | 7 +- .../datanode/TestDataNodeVolumeFailure.java | 3 +- .../datanode/TestDataSetLockManager.java | 3 +- .../TestDataXceiverBackwardsCompat.java | 3 +- .../datanode/TestSimulatedFSDataset.java | 5 +- .../fsdataset/impl/TestFsDatasetImpl.java | 17 +-- .../fsdataset/impl/TestFsVolumeList.java | 5 +- .../fsdataset/impl/TestLazyPersistFiles.java | 3 +- .../impl/TestReplicaCachingGetSpaceUsed.java | 5 +- .../fsdataset/impl/TestSpaceReservation.java | 2 +- .../server/diskbalancer/TestDiskBalancer.java | 5 +- .../namenode/NNThroughputBenchmark.java | 5 +- .../hdfs/server/namenode/TestAuditLogger.java | 5 +- .../hdfs/server/namenode/TestCheckpoint.java | 5 +- .../hdfs/server/namenode/TestDeleteRace.java | 15 +-- .../hdfs/server/namenode/TestEditLog.java | 3 +- .../hdfs/server/namenode/TestEditLogRace.java | 11 +- .../server/namenode/TestFSNamesystemLock.java | 15 +-- .../namenode/TestFSNamesystemMBean.java | 5 +- .../server/namenode/TestFileTruncate.java | 3 +- .../namenode/TestLargeDirectoryDelete.java | 5 +- .../server/namenode/TestListOpenFiles.java | 3 +- .../hdfs/server/namenode/TestMetaSave.java | 5 +- .../namenode/TestReencryptionHandler.java | 5 +- .../namenode/TestSecurityTokenEditLog.java | 3 +- .../namenode/ha/TestBootstrapStandby.java | 5 +- .../ha/TestConsistentReadsObserver.java | 11 +- .../ha/TestDelegationTokensWithHA.java | 5 +- .../server/namenode/ha/TestHASafeMode.java | 6 +- .../namenode/ha/TestRetryCacheWithHA.java | 5 +- .../namenode/ha/TestStandbyCheckpoints.java | 5 +- .../snapshot/TestOpenFilesWithSnapshot.java | 3 +- .../TestShortCircuitLocalRead.java | 7 +- .../hdfs/util/TestReferenceCountMap.java | 9 +- .../hadoop/hdfs/web/TestWebHDFSForHA.java | 5 +- .../hadoop/hdfs/web/TestWebHdfsTimeouts.java | 5 +- .../hadoop/mapred/LocalContainerLauncher.java | 3 +- .../jobhistory/JobHistoryEventHandler.java | 3 +- .../hadoop/mapreduce/v2/app/MRAppMaster.java | 5 +- .../v2/app/TaskHeartbeatHandler.java | 3 +- .../v2/app/commit/CommitterEventHandler.java | 5 +- .../app/launcher/ContainerLauncherImpl.java | 5 +- .../mapreduce/v2/app/rm/RMCommunicator.java | 3 +- .../v2/app/rm/RMContainerAllocator.java | 7 +- .../v2/app/speculate/DefaultSpeculator.java | 3 +- .../mapreduce/v2/app/MRAppBenchmark.java | 3 +- .../local/TestLocalContainerAllocator.java | 3 +- .../apache/hadoop/mapred/LocalJobRunner.java | 5 +- .../apache/hadoop/mapred/CleanupQueue.java | 5 +- .../org/apache/hadoop/mapred/MapTask.java | 5 +- .../java/org/apache/hadoop/mapred/Task.java | 5 +- .../hadoop/mapred/pipes/Application.java | 5 +- .../hadoop/mapred/pipes/BinaryProtocol.java | 5 +- .../hadoop/mapreduce/lib/chain/Chain.java | 9 +- .../lib/map/MultithreadedMapper.java | 5 +- .../mapreduce/task/reduce/EventFetcher.java | 5 +- .../hadoop/mapreduce/task/reduce/Fetcher.java | 5 +- .../mapreduce/task/reduce/LocalFetcher.java | 2 +- .../mapreduce/task/reduce/MergeThread.java | 5 +- .../task/reduce/ShuffleSchedulerImpl.java | 5 +- .../hadoop/mapreduce/util/ProcessTree.java | 5 +- .../apache/hadoop/mapred/TestIndexCache.java | 17 +-- .../mapred/TestLocatedFileStatusFetcher.java | 5 +- .../mapred/TestTaskProgressReporter.java | 7 +- .../v2/hs/TestHistoryFileManager.java | 9 +- .../mapreduce/v2/hs/TestJobHistoryEvents.java | 3 +- ...tUnnecessaryBlockingOnHistoryFileInfo.java | 5 +- .../java/org/apache/hadoop/FailingMapper.java | 5 +- .../org/apache/hadoop/fs/JHLogAnalyzer.java | 5 +- .../fs/loadGenerator/LoadGeneratorMR.java | 5 +- .../apache/hadoop/mapred/ReliabilityTest.java | 13 ++- .../org/apache/hadoop/mapred/TestCollect.java | 5 +- .../mapred/jobcontrol/TestJobControl.java | 3 +- .../jobcontrol/TestLocalJobControl.java | 3 +- .../mapred/pipes/TestPipeApplication.java | 4 +- .../hadoop/mapreduce/TestLocalRunner.java | 5 +- .../jobcontrol/TestMapReduceJobControl.java | 3 +- .../TestMapReduceJobControlWithMocks.java | 3 +- .../mapreduce/v2/MiniMRYarnCluster.java | 5 +- .../nativetask/StatusReportChecker.java | 3 +- .../examples/terasort/TeraInputFormat.java | 5 +- .../fs/s3a/ITestS3AIOStatisticsContext.java | 5 +- .../fs/s3a/scale/ITestS3AConcurrentOps.java | 4 +- .../AzureFileSystemThreadPoolExecutor.java | 3 +- .../fs/azure/BlockBlobAppendStream.java | 3 +- .../hadoop/fs/azure/SelfRenewingLease.java | 4 +- .../azure/metrics/BandwidthGaugeUpdater.java | 3 +- .../fs/azurebfs/services/ListActionTaker.java | 3 +- .../services/ReadBufferManagerV1.java | 3 +- .../services/ReadBufferManagerV2.java | 3 +- .../ITestAzureConcurrentOutOfBandIo.java | 3 +- ...rationsExceptionHandlingMultiThreaded.java | 25 +++-- .../azure/ITestNativeAzureFileSystemLive.java | 7 +- .../azure/NativeAzureFileSystemBaseTest.java | 5 +- .../TestNativeAzureFileSystemConcurrency.java | 3 +- .../metrics/TestBandwidthGaugeUpdater.java | 3 +- .../ITestAzureBlobFileSystemAppend.java | 3 +- .../ITestAzureBlobFileSystemRename.java | 5 +- .../compat/common/HdfsCompatShellScope.java | 5 +- .../tools/util/TestProducerConsumer.java | 9 +- .../tools/dynamometer/ApplicationMaster.java | 3 +- .../hadoop/tools/dynamometer/Client.java | 3 +- .../tools/dynamometer/DynoInfraUtils.java | 3 +- .../dynamometer/TestDynamometerInfra.java | 3 +- .../audit/AuditReplayThread.java | 5 +- .../procedure/BalanceProcedureScheduler.java | 13 ++- .../apache/hadoop/mapred/gridmix/Gridmix.java | 5 +- .../hadoop/mapred/gridmix/JobMonitor.java | 5 +- .../apache/hadoop/mapred/gridmix/LoadJob.java | 9 +- .../mapred/gridmix/ReplayJobFactory.java | 5 +- .../mapred/gridmix/SerialJobFactory.java | 5 +- .../hadoop/mapred/gridmix/Statistics.java | 5 +- .../mapred/gridmix/StressJobFactory.java | 5 +- .../service/ShutdownHook.java | 5 +- .../apache/hadoop/streaming/PipeMapRed.java | 10 +- .../distributedshell/ApplicationMaster.java | 3 +- .../DistributedShellBaseTest.java | 3 +- .../distributedshell/TestDSAppMaster.java | 3 +- .../distributedshell/TestDSTimelineV20.java | 5 +- .../TestDSWithMultipleNodeManager.java | 7 +- .../UnmanagedAMLauncher.java | 9 +- .../client/SystemServiceManagerImpl.java | 3 +- .../hadoop/yarn/service/ClientAMService.java | 5 +- .../service/utils/TestServiceApiUtil.java | 5 +- .../client/api/ContainerShellWebSocket.java | 3 +- .../api/async/impl/AMRMClientAsyncImpl.java | 9 +- .../api/async/impl/NMClientAsyncImpl.java | 7 +- .../apache/hadoop/yarn/client/cli/TopCLI.java | 7 +- .../yarn/client/ProtocolHATestBase.java | 5 +- ...TestFederationRMFailoverProxyProvider.java | 3 +- .../hadoop/yarn/client/TestGetGroups.java | 5 +- ...HedgingRequestRMFailoverProxyProvider.java | 5 +- .../hadoop/yarn/client/TestRMFailover.java | 5 +- ...gerAdministrationProtocolPBClientImpl.java | 5 +- .../api/async/impl/TestNMClientAsync.java | 5 +- .../yarn/client/api/impl/TestYarnClient.java | 5 +- .../hadoop/yarn/event/AsyncDispatcher.java | 5 +- .../hadoop/yarn/event/EventDispatcher.java | 3 +- .../yarn/util/AbstractLivelinessMonitor.java | 3 +- .../TestYarnUncaughtExceptionHandler.java | 7 +- .../TestAggregatedLogFormat.java | 5 +- .../yarn/util/TestProcfsBasedProcessTree.java | 5 +- .../server/timeline/LeveldbTimelineStore.java | 5 +- .../timeline/RollingLevelDBTimelineStore.java | 5 +- .../server/AMHeartbeatRequestHandler.java | 5 +- .../server/uam/UnmanagedAMPoolManager.java | 3 +- .../uam/TestUnmanagedApplicationManager.java | 7 +- .../server/nodemanager/ContainerExecutor.java | 5 +- .../yarn/server/nodemanager/NodeManager.java | 9 +- .../nodemanager/NodeResourceMonitorImpl.java | 5 +- .../nodemanager/NodeStatusUpdaterImpl.java | 5 +- .../WindowsSecureContainerExecutor.java | 5 +- .../container/ContainerImpl.java | 5 +- .../CGroupElasticMemoryController.java | 5 +- .../ResourceLocalizationService.java | 13 ++- .../monitor/ContainersMonitorImpl.java | 9 +- .../TestLinuxContainerExecutor.java | 5 +- .../nodemanager/TestNodeManagerResync.java | 5 +- .../nodemanager/TestNodeStatusUpdater.java | 3 +- .../TestableFederationInterceptor.java | 4 +- .../localizer/TestContainerLocalizer.java | 9 +- .../util/TestCgroupsLCEResourcesHandler.java | 5 +- .../resourcemanager/ResourceManager.java | 5 +- .../amlauncher/ApplicationMasterLauncher.java | 5 +- .../FederationStateStoreService.java | 3 +- .../metrics/TimelineServiceV1Publisher.java | 5 +- .../monitor/SchedulingMonitor.java | 3 +- .../recovery/ZKRMStateStore.java | 5 +- .../rmapp/attempt/RMAppAttemptImpl.java | 5 +- .../scheduler/AbstractYarnScheduler.java | 5 +- .../activities/ActivitiesManager.java | 3 +- .../scheduler/capacity/CapacityScheduler.java | 9 +- .../fair/AllocationFileLoaderService.java | 3 +- .../scheduler/fair/FSPreemptionThread.java | 5 +- .../scheduler/fair/FairScheduler.java | 5 +- .../scheduler/placement/MultiNodeSorter.java | 3 +- .../security/DelegationTokenRenewer.java | 7 +- .../server/resourcemanager/ACLsTestBase.java | 5 +- .../resourcemanager/TestApplicationACLs.java | 5 +- .../resourcemanager/TestClientRMService.java | 5 +- .../TestLeaderElectorService.java | 5 +- .../yarn/server/resourcemanager/TestRMHA.java | 3 +- .../TestRMHAForAsyncScheduler.java | 18 +-- .../recovery/TestFSRMStateStore.java | 3 +- ...TestZKRMStateStoreZKClientConnections.java | 6 +- .../resourcetracker/TestNMExpiry.java | 5 +- .../scheduler/TestQueueMetrics.java | 5 +- .../capacity/TestCapacityScheduler.java | 9 +- .../TestCapacitySchedulerAsyncScheduling.java | 5 +- .../TestCapacitySchedulerMultiNodes.java | 6 +- ...citySchedulerMultiNodesWithPreemption.java | 5 +- .../scheduler/capacity/TestLeafQueue.java | 7 +- .../fair/FairSchedulerWithMockPreemption.java | 2 +- .../fair/TestContinuousScheduling.java | 5 +- .../TestDominantResourceFairnessPolicy.java | 5 +- .../security/TestDelegationTokenRenewer.java | 5 +- .../TestRMWebServicesSchedulerActivities.java | 5 +- .../hadoop/yarn/server/router/Router.java | 3 +- .../clientrm/TestRouterClientRMService.java | 5 +- .../rmadmin/TestRouterRMAdminService.java | 5 +- .../router/webapp/TestRouterWebServices.java | 5 +- .../cosmosdb/CosmosDBDocumentStoreReader.java | 3 +- .../cosmosdb/CosmosDBDocumentStoreWriter.java | 3 +- 331 files changed, 1215 insertions(+), 760 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index 1c451ca6d30b9..5d7077668e0f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -22,6 +22,7 @@ import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,15 +106,16 @@ public Collection getChangedProperties( /** * A background thread to apply configuration changes. */ - private static class ReconfigurationThread extends Thread { + private static class ReconfigurationThread extends HadoopThread { private ReconfigurableBase parent; ReconfigurationThread(ReconfigurableBase base) { + super(); this.parent = base; } // See {@link ReconfigurationServlet#applyChanges} - public void run() { + public void work() { LOG.info("Starting reconfiguration task."); final Configuration oldConf = parent.getConf(); final Configuration newConf = parent.getNewConf(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java index d7b61346d4e3b..b354314952e0f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -107,7 +108,7 @@ void init() { */ private void initRefreshThread(boolean runImmediately) { if (refreshInterval > 0) { - refreshUsed = new Thread(new RefreshThread(this, runImmediately), + refreshUsed = new HadoopThread(new RefreshThread(this, runImmediately), "refreshUsed-" + dirPath); refreshUsed.setDaemon(true); refreshUsed.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 794855508c63f..20eed047b5136 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +39,7 @@ */ @InterfaceAudience.Private public class DelegationTokenRenewer - extends Thread { + extends HadoopThread { private static final Logger LOG = LoggerFactory .getLogger(DelegationTokenRenewer.class); @@ -263,7 +264,7 @@ public void removeRenewAction( } @Override - public void run() { + public void work() { for(;;) { RenewAction action = null; try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 930abf0b5d172..8f292dbfe3819 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -81,6 +81,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.tracing.TraceScope; import org.apache.hadoop.util.Preconditions; @@ -4087,7 +4088,7 @@ private interface StatisticsAggregator { static { STATS_DATA_REF_QUEUE = new ReferenceQueue<>(); // start a single daemon cleaner thread - STATS_DATA_CLEANER = new Thread(new StatisticsDataReferenceCleaner()); + STATS_DATA_CLEANER = new HadoopThread(new StatisticsDataReferenceCleaner()); STATS_DATA_CLEANER. setName(StatisticsDataReferenceCleaner.class.getName()); STATS_DATA_CLEANER.setDaemon(true); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java index d222d52e37349..d37c321ff4311 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java @@ -283,7 +283,7 @@ public void uncaughtException(Thread t, Throwable e) { } @Override - public void run() { + public void work() { while (shouldRun) { try { loopUntilConnected(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java index 12a24fd079e62..f117cbcfbfa20 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ha; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import java.io.BufferedReader; @@ -50,7 +51,7 @@ enum StreamType { this.stream = stream; this.type = type; - thread = new Thread(new Runnable() { + thread = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java index 60210ccd920c2..010f4928be9ff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java @@ -158,7 +158,7 @@ void tryStart() { if (running.compareAndSet(null, current)) { final Daemon daemon = new Daemon() { @Override - public void run() { + public void work() { for (; isRunning(this);) { final long waitTime = checkCalls(); tryStop(this); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 5caf27edcdaf2..d3de874d16752 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -54,6 +54,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGet; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.Tracer; import org.slf4j.Logger; @@ -407,7 +408,7 @@ public synchronized void setRpcResponse(Writable rpcResponse) { /** Thread that reads responses and notifies callers. Each connection owns a * socket connected to a remote address. Calls are multiplexed through this * socket: responses may be delivered out of order. */ - private class Connection extends Thread { + private class Connection extends HadoopThread { private InetSocketAddress server; // server ip:port private final ConnectionId remoteId; // connection id private AuthMethod authMethod; // authentication method @@ -448,7 +449,7 @@ private class Connection extends Thread { Consumer removeMethod) { this.remoteId = remoteId; this.server = remoteId.getAddress(); - this.rpcRequestThread = new Thread(new RpcRequestSender(), + this.rpcRequestThread = new HadoopThread(new RpcRequestSender(), "IPC Parameter Sending Thread for " + remoteId); this.rpcRequestThread.setDaemon(true); @@ -1126,7 +1127,7 @@ private synchronized void sendPing() throws IOException { } @Override - public void run() { + public void work() { try { // Don't start the ipc parameter sending thread until we start this // thread, because the shutdown logic only gets triggered if this diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index ca7460a653c9a..49d316a9f4678 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -124,6 +124,8 @@ import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; + import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.SpanContext; @@ -1471,7 +1473,7 @@ public String toString() { } /** Listens on the socket. Creates jobs for the handler threads*/ - private class Listener extends Thread { + private class Listener extends HadoopThread { private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server @@ -1520,7 +1522,7 @@ void setIsAuxiliary() { this.isOnAuxiliaryPort = true; } - private class Reader extends Thread { + private class Reader extends HadoopThread { final private BlockingQueue pendingConnections; private final Selector readSelector; @@ -1533,7 +1535,7 @@ private class Reader extends Thread { } @Override - public void run() { + public void work() { LOG.info("Starting " + Thread.currentThread().getName()); try { doRunLoop(); @@ -1612,7 +1614,7 @@ void shutdown() { } @Override - public void run() { + public void work() { LOG.info(Thread.currentThread().getName() + ": starting"); SERVER.set(Server.this); connectionManager.startIdleScan(); @@ -1760,7 +1762,7 @@ Reader getReader() { } // Sends responses of RPC back to clients. - private class Responder extends Thread { + private class Responder extends HadoopThread { private final Selector writeSelector; private int pending; // connections waiting to register @@ -1772,7 +1774,7 @@ private class Responder extends Thread { } @Override - public void run() { + public void work() { LOG.info(Thread.currentThread().getName() + ": starting"); SERVER.set(Server.this); try { @@ -3219,7 +3221,7 @@ private void internalQueueCall(Call call, boolean blocking) } /** Handles queued calls . */ - private class Handler extends Thread { + private class Handler extends HadoopThread { public Handler(int instanceNumber) { this.setDaemon(true); this.setName("IPC Server handler "+ instanceNumber + @@ -3227,7 +3229,7 @@ public Handler(int instanceNumber) { } @Override - public void run() { + public void work() { LOG.debug("{}: starting", Thread.currentThread().getName()); SERVER.set(Server.this); while (running) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index c8843f2812e57..60ebc96d1e9f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +49,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { private final MetricsSink sink; private final MetricsFilter sourceFilter, recordFilter, metricFilter; private final SinkQueue queue; - private final Thread sinkThread; + private final HadoopThread sinkThread; private volatile boolean stopping = false; private volatile boolean inError = false; private final int periodMs, firstRetryDelay, retryCount; @@ -84,8 +85,8 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { "Dropped updates per sink", 0); qsize = registry.newGauge("Sink_"+ name + "Qsize", "Queue size", 0); - sinkThread = new Thread() { - @Override public void run() { + sinkThread = new HadoopThread() { + @Override public void work() { publishMetricsFromQueue(); } }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index 5c8a3357a3ee6..f867370add712 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -440,7 +440,7 @@ private void sendCallbackAndRemove(String caller, } @VisibleForTesting - final Thread watcherThread = new Thread(new Runnable() { + final Thread watcherThread = new HadoopThread(new Runnable() { @Override public void run() { if (LOG.isDebugEnabled()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index b6be569026fd7..652d5001ab4e1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -89,7 +89,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -930,7 +930,7 @@ private void executeAutoRenewalTask(final String userName, new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setDaemon(true); t.setName("TGT Renewer for " + userName); return t; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 9cf3ccdd445e7..960f6aaf12f40 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -60,7 +60,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.functional.InvocationRaisingIOE; import org.slf4j.Logger; @@ -912,12 +912,12 @@ public boolean isRunning() { return running; } - private class ExpiredTokenRemover extends Thread { + private class ExpiredTokenRemover extends HadoopThread { private long lastMasterKeyUpdate; private long lastTokenCacheCleanup; @Override - public void run() { + public void work() { LOG.info("Starting expired delegation token remover thread, " + "tokenRemoverScanInterval=" + tokenRemoverScanInterval / (60 * 1000) + " min(s)"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java index 4d43c3a106f5e..b9f11152203be 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,7 +117,7 @@ public void interrupted(IrqHandler.InterruptData interruptData) { //start an async shutdown thread with a timeout ServiceForcedShutdown shutdown = new ServiceForcedShutdown(service, shutdownTimeMillis); - Thread thread = new Thread(shutdown); + Thread thread = new HadoopThread(shutdown); thread.setDaemon(true); thread.setName("Service Forced Shutdown"); thread.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java index a3bf4faf0a980..5b877618ef5e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,7 +75,7 @@ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new Thread(threadGroup, r); + return new HadoopThread(threadGroup, r); } }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java index 5c90e4bd2d601..e1d01f82c919b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java @@ -29,6 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * This ExecutorService blocks the submission of new tasks when its queue is @@ -71,7 +72,7 @@ static ThreadFactory getNamedThreadFactory(final String prefix) { public Thread newThread(Runnable r) { final String name = prefix + "-pool" + poolNum + "-t" + threadNumber.getAndIncrement(); - return new Thread(group, r, name); + return new HadoopThread(group, r, name); } }; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java index f735b82e4289b..eb789755eea22 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java @@ -18,16 +18,59 @@ package org.apache.hadoop.util; +import java.security.PrivilegedAction; import java.util.concurrent.ThreadFactory; +import javax.security.auth.Subject; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.security.authentication.util.SubjectUtil; -/** A thread that has called {@link Thread#setDaemon(boolean) } with true.*/ +/** A thread that has called {@link Thread#setDaemon(boolean) } with true. + * + * The runnable code must either be specified in the runnable parameter or + * in the override work() method. + * + * The subject propagation is already added in either case. + * + * */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public class Daemon extends Thread { + Subject startSubject; + + @Override + public final void start() { + startSubject = SubjectUtil.current(); + super.start(); + } + + /** + * Override this instead of run() + */ + public void work() { + throw new IllegalArgumentException(""); + } + + @Override + public final void run() { + SubjectUtil.doAs(startSubject, new PrivilegedAction() { + + @Override + public Void run() { + if (runnable != null) { + runnable.run(); + } else { + work(); + } + return null; + } + + }); + } + { setDaemon(true); // always a daemon } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java index 95d0d4d290ccd..a8aa6079c557f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java @@ -23,13 +23,15 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.util.concurrent.HadoopThread; + /** * This class monitors the percentage of time the JVM is paused in GC within * the specified observation window, say 1 minute. The user can provide a * hook which will be called whenever this percentage exceeds the specified * threshold. */ -public class GcTimeMonitor extends Thread { +public class GcTimeMonitor extends HadoopThread { private final long maxGcTimePercentage; private final long observationWindowMs, sleepIntervalMs; @@ -151,7 +153,7 @@ public GcTimeMonitor(long observationWindowMs, long sleepIntervalMs, } @Override - public void run() { + public void work() { startTime = System.currentTimeMillis(); curData.timestamp = startTime; gcDataBuf[startIdx].setValues(startTime, 0); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index c53ddc0725ca2..d025af725a955 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -36,6 +36,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; @@ -1020,9 +1021,9 @@ private void runCommand() throws IOException { // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new Thread() { + Thread errThread = new HadoopThread() { @Override - public void run() { + public void work() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java index e85f850514b16..de521001d2a79 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,9 +85,9 @@ public final class ShutdownHookManager { static { try { Runtime.getRuntime().addShutdownHook( - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { if (MGR.shutdownInProgress.getAndSet(true)) { LOG.info("Shutdown process invoked a second time: ignoring"); return; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java new file mode 100644 index 0000000000000..f68c3f6f4fff4 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.concurrent; + +import java.security.PrivilegedAction; +import javax.security.auth.Subject; + +import org.apache.hadoop.security.authentication.util.SubjectUtil; + +/** + * Helper class to restore Subject propagation behavior after the JEP411/JEP486 + * changes + * + * Runnables can be specified normally, but the work() method has to be + * overridden instead of run() when subclassing. + */ +public class HadoopThread extends Thread { + + Subject startSubject; + Runnable hadoopTarget; + + public HadoopThread() { + super(); + } + + public HadoopThread(Runnable target) { + super(); + this.hadoopTarget = target; + } + + public HadoopThread(ThreadGroup group, Runnable target) { + // The target passed to Thread has no effect, we only pass it + // because there is no super(group) constructor. + super(group, target); + this.hadoopTarget = target; + } + + public HadoopThread(Runnable target, String name) { + super(name); + this.hadoopTarget = target; + } + + public HadoopThread(String name) { + super(name); + } + + public HadoopThread(ThreadGroup group, String name) { + super(group, name); + } + + public HadoopThread(ThreadGroup group, Runnable target, String name) { + super(group, name); + this.hadoopTarget = target; + } + + @Override + public final void start() { + startSubject = SubjectUtil.current(); + super.start(); + } + + /** + * Override this instead of run() + * + * It is really unfortunate that we have to introduce a new method and cannot reuse run(), + * but since run() is designed to be overridden, I couldn't find any other way to make this work. + * + */ + public void work() { + throw new IllegalArgumentException("No Runnable was specified and work() is not overriden"); + } + + @Override + public final void run() { + SubjectUtil.doAs(startSubject, new PrivilegedAction() { + + @Override + public Void run() { + if (hadoopTarget != null) { + hadoopTarget.run(); + } else { + work(); + } + return null; + } + + }); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index fa0301b251765..408b4ad66c3ea 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -81,6 +81,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; @@ -2478,7 +2479,7 @@ public void testConcurrentAccesses() throws Exception { Configuration conf = new Configuration(); conf.addResource(fileResource); - class ConfigModifyThread extends Thread { + class ConfigModifyThread extends HadoopThread { final private Configuration config; final private String prefix; @@ -2488,7 +2489,7 @@ public ConfigModifyThread(Configuration conf, String prefix) { } @Override - public void run() { + public void work() { for (int i = 0; i < 10000; i++) { config.set("some.config.value-" + prefix + i, "value"); } @@ -2746,7 +2747,7 @@ private static Configuration checkCDATA(byte[] bytes) { @Test public void testConcurrentModificationDuringIteration() throws InterruptedException { Configuration configuration = new Configuration(); - new Thread(() -> { + new HadoopThread(() -> { while (true) { configuration.set(String.valueOf(Math.random()), String.valueOf(Math.random())); } @@ -2754,7 +2755,7 @@ public void testConcurrentModificationDuringIteration() throws InterruptedExcept AtomicBoolean exceptionOccurred = new AtomicBoolean(false); - new Thread(() -> { + new HadoopThread(() -> { while (true) { try { configuration.iterator(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index c475f7c826bb3..d3c3b88afa802 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -22,6 +22,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -284,7 +285,7 @@ public void testReconfigure() { public void testThread() throws ReconfigurationException { ReconfigurableDummy dummy = new ReconfigurableDummy(conf1); assertTrue(dummy.getConf().get(PROP1).equals(VAL1)); - Thread dummyThread = new Thread(dummy); + Thread dummyThread = new HadoopThread(dummy); dummyThread.start(); try { Thread.sleep(500); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java index 80edbeab4c8f8..fae068aff9c9a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java @@ -40,6 +40,7 @@ import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,9 +74,9 @@ public void testStatisticsOperations() throws Exception { stats.incrementWriteOps(123); assertEquals(123, stats.getWriteOps()); - Thread thread = new Thread() { + HadoopThread thread = new HadoopThread() { @Override - public void run() { + public void work() { stats.incrementWriteOps(1); } }; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java index 119bad41a3028..9819c13ac185b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java @@ -36,7 +36,7 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_CREATION_PARALLEL_COUNT; @@ -125,9 +125,9 @@ public void initialize(URI uri, Configuration conf) throws IOException { @Test public void testCacheEnabledWithInitializeForeverFS() throws Exception { final Configuration conf = new Configuration(); - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { conf.set("fs.localfs1.impl", "org.apache.hadoop.fs." + "TestFileSystemCaching$InitializeForeverFileSystem"); try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index 89d7419f763d2..27bcc0c108485 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * This class tests commands from Trash. @@ -724,7 +725,7 @@ public void testTrashEmptier() throws Exception { // Start Emptier in background Runnable emptier = trash.getEmptier(); - Thread emptierThread = new Thread(emptier); + Thread emptierThread = new HadoopThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -792,7 +793,7 @@ public void testTrashEmptierCleanDirNotInCheckpointDir() throws Exception { // Start Emptier in background. Runnable emptier = trash.getEmptier(); - Thread emptierThread = new Thread(emptier); + Thread emptierThread = new HadoopThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -1049,7 +1050,7 @@ private void verifyAuditableTrashEmptier(Trash trash, Thread emptierThread = null; try { Runnable emptier = trash.getEmptier(); - emptierThread = new Thread(emptier); + emptierThread = new HadoopThread(emptier); emptierThread.start(); // Shutdown the emptier thread after a given time diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index d52abbc2a99bd..c79ac1da6b77d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -46,6 +46,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; @@ -215,7 +216,7 @@ public LoadGenerator(Configuration conf) throws IOException, UnknownHostExceptio * A thread runs for the specified elapsed time if the time isn't zero. * Otherwise, it runs forever. */ - private class DFSClientThread extends Thread { + private class DFSClientThread extends HadoopThread { private int id; private long [] executionTime = new long[TOTAL_OP_TYPES]; private long [] totalNumOfOps = new long[TOTAL_OP_TYPES]; @@ -230,7 +231,7 @@ private DFSClientThread(int id) { * Each iteration decides what's the next operation and then pauses. */ @Override - public void run() { + public void work() { try { while (shouldRun) { nextOp(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java index 99a8fc71898df..a356b5e0fdf2b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java @@ -18,6 +18,7 @@ package org.apache.hadoop.io; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -93,9 +94,9 @@ public void testMD5Hash() throws Exception { assertTrue(closeHash1.hashCode() != closeHash2.hashCode(), "hash collision"); - Thread t1 = new Thread() { + HadoopThread t1 = new HadoopThread() { @Override - public void run() { + public void work() { for (int i = 0; i < 100; i++) { MD5Hash hash = new MD5Hash(DFF); assertEquals(hash, md5HashFF); @@ -103,9 +104,9 @@ public void run() { } }; - Thread t2 = new Thread() { + HadoopThread t2 = new HadoopThread() { @Override - public void run() { + public void work() { for (int i = 0; i < 100; i++) { MD5Hash hash = new MD5Hash(D00); assertEquals(hash, md5Hash00); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index 95c4abe5e3907..04579099b3acb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -27,6 +27,7 @@ import org.apache.hadoop.constants.ConfigConstants; import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; @@ -300,13 +301,13 @@ public void testTextText() throws CharacterCodingException { assertEquals(8, a.copyBytes().length); } - private class ConcurrentEncodeDecodeThread extends Thread { + private class ConcurrentEncodeDecodeThread extends HadoopThread { public ConcurrentEncodeDecodeThread(String name) { super(name); } @Override - public void run() { + public void work() { final String name = this.getName(); DataOutputBuffer out = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 7d4f24efd52ce..8f96f338da973 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -56,6 +56,8 @@ import org.apache.hadoop.test.StatUtils; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; + import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.*; import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; @@ -135,9 +137,9 @@ public void testMultiThreadedFstat() throws Exception { new AtomicReference(); List statters = new ArrayList(); for (int i = 0; i < 10; i++) { - Thread statter = new Thread() { + HadoopThread statter = new HadoopThread() { @Override - public void run() { + public void work() { long et = Time.now() + 5000; while (Time.now() < et) { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java index d44727b4b65b6..140a850bc2ac6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -44,15 +45,15 @@ public class TestNativeIoInit { @Test @Timeout(value = 10) public void testDeadlockLinux() throws Exception { - Thread one = new Thread() { + Thread one = new HadoopThread() { @Override - public void run() { + public void work() { NativeIO.isAvailable(); } }; - Thread two = new Thread() { + Thread two = new HadoopThread() { @Override - public void run() { + public void work() { NativeIO.POSIX.isAvailable(); } }; @@ -66,15 +67,15 @@ public void run() { @Timeout(value = 10) public void testDeadlockWindows() throws Exception { assumeTrue(Path.WINDOWS, "Expected windows"); - Thread one = new Thread() { + HadoopThread one = new HadoopThread() { @Override - public void run() { + public void work() { NativeIO.isAvailable(); } }; - Thread two = new Thread() { + HadoopThread two = new HadoopThread() { @Override - public void run() { + public void work() { try { NativeIO.Windows.extendWorkingSetSize(100); } catch (IOException e) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java index a541ea99fcfdf..54260514f7c84 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java @@ -28,6 +28,7 @@ import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; public class TestFailoverProxy { @@ -252,7 +253,7 @@ public String failsIfIdentifierDoesntMatch(String identifier) } - private static class ConcurrentMethodThread extends Thread { + private static class ConcurrentMethodThread extends HadoopThread { private UnreliableInterface unreliable; public String result; @@ -262,7 +263,7 @@ public ConcurrentMethodThread(UnreliableInterface unreliable) { } @Override - public void run() { + public void work() { try { result = unreliable.failsIfIdentifierDoesntMatch("impl2"); } catch (Exception e) { @@ -327,9 +328,9 @@ public void testFailoverBetweenMultipleStandbys() RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, 10, 1000, 10000)); - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep); impl1.setIdentifier("renamed-impl1"); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java index 1e0afe587ca96..932af5d3dd379 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java @@ -30,6 +30,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGetFuture; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -73,7 +74,7 @@ public void setupConf() { Client.setAsynchronousMode(true); } - static class AsyncCaller extends Thread { + static class AsyncCaller extends HadoopThread { private Client client; private InetSocketAddress server; private int count; @@ -96,7 +97,7 @@ static class AsyncCaller extends Thread { } @Override - public void run() { + public void work() { // In case Thread#Start is called, which will spawn new thread. Client.setAsynchronousMode(true); for (int i = 0; i < count; i++) { @@ -154,7 +155,7 @@ void assertReturnValues(long timeout, TimeUnit unit) * For testing the asynchronous calls of the RPC client * implemented with CompletableFuture. */ - static class AsyncCompletableFutureCaller extends Thread { + static class AsyncCompletableFutureCaller extends HadoopThread { private final Client client; private final InetSocketAddress server; private final int count; @@ -171,7 +172,7 @@ static class AsyncCompletableFutureCaller extends Thread { } @Override - public void run() { + public void work() { // Set the RPC client to use asynchronous mode. Client.setAsynchronousMode(true); long startTime = Time.monotonicNow(); @@ -204,7 +205,7 @@ public void assertReturnValues() } } - static class AsyncLimitlCaller extends Thread { + static class AsyncLimitlCaller extends HadoopThread { private Client client; private InetSocketAddress server; private int count; @@ -242,7 +243,7 @@ public AsyncLimitlCaller(int callerId, Client client, InetSocketAddress server, } @Override - public void run() { + public void work() { // in case Thread#Start is called, which will spawn new thread Client.setAsynchronousMode(true); for (int i = 0; i < count; i++) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java index bc607d762a3cd..57bdd2c7047b5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -151,7 +152,7 @@ public void assertCanTake(CallQueueManager cq, int numberOfTakes, int takeAttempts) throws InterruptedException { Taker taker = new Taker(cq, takeAttempts, -1); - Thread t = new Thread(taker); + Thread t = new HadoopThread(taker); t.start(); t.join(100); @@ -164,7 +165,7 @@ public void assertCanPut(CallQueueManager cq, int numberOfPuts, int putAttempts) throws InterruptedException { Putter putter = new Putter(cq, putAttempts, -1); - Thread t = new Thread(putter); + Thread t = new HadoopThread(putter); t.start(); t.join(100); @@ -277,7 +278,7 @@ public void testSwapUnderContention() throws InterruptedException { // Create putters and takers for (int i=0; i < 1000; i++) { Putter p = new Putter(manager, -1, -1); - Thread pt = new Thread(p); + Thread pt = new HadoopThread(p); producers.add(p); threads.put(p, pt); @@ -286,7 +287,7 @@ public void testSwapUnderContention() throws InterruptedException { for (int i=0; i < 100; i++) { Taker t = new Taker(manager, -1, -1); - Thread tt = new Thread(t); + Thread tt = new HadoopThread(t); consumers.add(t); threads.put(t, tt); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java index 1afc88c562c8e..eb4c496e7c786 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java @@ -50,6 +50,7 @@ import java.util.List; import java.util.concurrent.BlockingQueue; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; @@ -684,7 +685,7 @@ public void assertCanTake(BlockingQueue cq, int numberOfTakes, CountDownLatch latch = new CountDownLatch(numberOfTakes); Taker taker = new Taker(cq, takeAttempts, "default", latch); - Thread t = new Thread(taker); + Thread t = new HadoopThread(taker); t.start(); latch.await(); @@ -698,7 +699,7 @@ public void assertCanPut(BlockingQueue cq, int numberOfPuts, CountDownLatch latch = new CountDownLatch(numberOfPuts); Putter putter = new Putter(cq, putAttempts, null, latch); - Thread t = new Thread(putter); + Thread t = new HadoopThread(putter); t.start(); latch.await(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index a191095b44516..d1882b203b045 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -103,6 +103,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -252,7 +253,7 @@ public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, } } - private static class SerialCaller extends Thread { + private static class SerialCaller extends HadoopThread { private Client client; private InetSocketAddress server; private int count; @@ -265,7 +266,7 @@ public SerialCaller(Client client, InetSocketAddress server, int count) { } @Override - public void run() { + public void work() { for (int i = 0; i < count; i++) { try { final long param = RANDOM.nextLong(); @@ -996,7 +997,7 @@ private void checkBlocking(int readers, int readerQ, int callQ) throws Exception // instantiate the threads, will start in batches Thread[] threads = new Thread[clients]; for (int i=0; i future = new FutureTask(clientCallable); - Thread clientThread = new Thread(future); + Thread clientThread = new HadoopThread(future); clientThread.start(); server.awaitInvocation(); @@ -146,7 +147,7 @@ public void testDeferredException() throws IOException, InterruptedException, new ClientCallable(serverAddress, conf, requestBytes); FutureTask future = new FutureTask(clientCallable); - Thread clientThread = new Thread(future); + Thread clientThread = new HadoopThread(future); clientThread.start(); server.awaitInvocation(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java index 51c66abb3fc26..74529bd89fe61 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java @@ -38,6 +38,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.SocksSocketFactory; import org.apache.hadoop.net.StandardSocketFactory; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -63,7 +64,7 @@ public class TestSocketFactory { private void startTestServer() throws Exception { // start simple tcp server. serverRunnable = new ServerRunnable(); - serverThread = new Thread(serverRunnable); + serverThread = new HadoopThread(serverRunnable); serverThread.start(); final long timeout = System.currentTimeMillis() + START_STOP_TIMEOUT_SEC * 1000; while (!serverRunnable.isReady()) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java index 33f9946e94d9e..9efa9ff73c7f7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java @@ -21,6 +21,7 @@ import java.util.ConcurrentModificationException; import java.util.concurrent.CountDownLatch; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,8 +80,8 @@ private void testEmptyBlocking(int awhile) throws Exception { final SinkQueue q = new SinkQueue(2); final Runnable trigger = mock(Runnable.class); // try consuming emtpy equeue and blocking - Thread t = new Thread() { - @Override public void run() { + HadoopThread t = new HadoopThread() { + @Override public void work() { try { assertEquals(1, (int) q.dequeue(), "element"); q.consume(new Consumer() { @@ -255,8 +256,8 @@ private SinkQueue newSleepingConsumerQueue(int capacity, q.enqueue(i); } final CountDownLatch barrier = new CountDownLatch(1); - Thread t = new Thread() { - @Override public void run() { + HadoopThread t = new HadoopThread() { + @Override public void work() { try { Thread.sleep(10); // causes failure without barrier q.consume(new Consumer() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java index f423b57d1c3e2..d1bb32d64f15a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java @@ -40,6 +40,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.Quantile; import org.apache.hadoop.thirdparty.com.google.common.math.Stats; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -209,7 +210,7 @@ interface TestProtocol { rates.add("metric" + i, 0); } - Thread[] threads = new Thread[n]; + HadoopThread[] threads = new HadoopThread[n]; final CountDownLatch firstAddsFinished = new CountDownLatch(threads.length); final CountDownLatch firstSnapshotsFinished = new CountDownLatch(1); final CountDownLatch secondAddsFinished = @@ -220,9 +221,9 @@ interface TestProtocol { final Random sleepRandom = new Random(seed); for (int tIdx = 0; tIdx < threads.length; tIdx++) { final int threadIdx = tIdx; - threads[threadIdx] = new Thread() { + threads[threadIdx] = new HadoopThread() { @Override - public void run() { + public void work() { try { for (int i = 0; i < 1000; i++) { rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java index 2110f33981dde..5872cf004fe71 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java @@ -39,6 +39,7 @@ import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JvmPauseMonitor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Timeout; import java.util.ArrayList; @@ -296,11 +297,11 @@ private static void updateThreadsAndWait(List threads, } } - static class TestThread extends Thread { + static class TestThread extends HadoopThread { private volatile boolean exit = false; private boolean exited = false; @Override - public void run() { + public void work() { while (!exit) { try { Thread.sleep(1000); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index a1704a0ec3013..fb917b822e375 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -47,7 +47,7 @@ import org.apache.hadoop.net.unix.DomainSocket.DomainChannel; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.io.Files; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -457,8 +457,8 @@ void testClientServer1(final Class writeStrategyClass, new ArrayBlockingQueue(2); final DomainSocket serv = (preConnectedSockets != null) ? null : DomainSocket.bindAndListen(TEST_PATH); - Thread serverThread = new Thread() { - public void run(){ + Thread serverThread = new HadoopThread() { + public void work(){ // Run server DomainSocket conn = null; try { @@ -485,8 +485,8 @@ public void run(){ }; serverThread.start(); - Thread clientThread = new Thread() { - public void run(){ + HadoopThread clientThread = new HadoopThread() { + public void work(){ try { DomainSocket client = preConnectedSockets != null ? preConnectedSockets[1] : DomainSocket.connect(TEST_PATH); @@ -626,8 +626,8 @@ public void testFdPassing() throws Exception { for (int i = 0; i < passedFiles.length; i++) { passedFds[i] = passedFiles[i].getInputStream().getFD(); } - Thread serverThread = new Thread() { - public void run(){ + Thread serverThread = new HadoopThread() { + public void work(){ // Run server DomainSocket conn = null; try { @@ -649,8 +649,8 @@ public void run(){ }; serverThread.start(); - Thread clientThread = new Thread() { - public void run(){ + Thread clientThread = new HadoopThread() { + public void work(){ try { DomainSocket client = DomainSocket.connect(TEST_PATH); OutputStream clientOutputStream = client.getOutputStream(); @@ -783,7 +783,7 @@ public void run() { } } }; - Thread readerThread = new Thread(reader); + Thread readerThread = new HadoopThread(reader); readerThread.start(); socks[0].getOutputStream().write(1); socks[0].getOutputStream().write(2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java index f78005a6ed3f2..6d7a5a163fc49 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java @@ -32,6 +32,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -128,7 +129,7 @@ public void testStress() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new Thread(new Runnable() { + final Thread adderThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -155,7 +156,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new Thread(new Runnable() { + final Thread removerThread = new HadoopThread(new Runnable() { @Override public void run() { final Random random = new Random(); @@ -199,7 +200,7 @@ public void testStressInterruption() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new Thread(new Runnable() { + final Thread adderThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -227,7 +228,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new Thread(new Runnable() { + final Thread removerThread = new HadoopThread(new Runnable() { @Override public void run() { final Random random = new Random(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java index fe6bc4f58de93..69f5ba65af560 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.security; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -42,7 +43,7 @@ public void testClearAuthorizationHeader() { public void testThreadLocalIsolation() throws Exception { byte[] mainHeader = "main-thread".getBytes(); AuthorizationContext.setCurrentAuthorizationHeader(mainHeader); - Thread t = new Thread(() -> { + HadoopThread t = new HadoopThread(() -> { Assertions.assertNull(AuthorizationContext.getCurrentAuthorizationHeader()); byte[] threadHeader = "other-thread".getBytes(); AuthorizationContext.setCurrentAuthorizationHeader(threadHeader); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index 5a2927f71c18e..cb2687db17f3d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -31,6 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.FakeTimer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -406,10 +407,10 @@ public void testOnlyOneRequestWhenNoEntryIsCached() throws Exception { FakeGroupMapping.clearBlackList(); FakeGroupMapping.setGetGroupsDelayMs(100); - ArrayList threads = new ArrayList(); + ArrayList threads = new ArrayList(); for (int i = 0; i < 10; i++) { - threads.add(new Thread() { - public void run() { + threads.add(new HadoopThread() { + public void work() { try { assertEquals(2, groups.getGroups("me").size()); } catch (IOException e) { @@ -451,10 +452,10 @@ public void testOnlyOneRequestWhenExpiredEntryExists() throws Exception { timer.advance(400 * 1000); Thread.sleep(100); - ArrayList threads = new ArrayList(); + ArrayList threads = new ArrayList(); for (int i = 0; i < 10; i++) { - threads.add(new Thread() { - public void run() { + threads.add(new HadoopThread() { + public void work() { try { assertEquals(2, groups.getGroups("me").size()); } catch (IOException e) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java index 1bb43ffcd5eb5..464a1ae4878bd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java @@ -59,7 +59,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.JavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -414,7 +414,7 @@ public void testLdapConnectionTimeout() // Below we create a LDAP server which will accept a client request; // but it will never reply to the bind (connect) request. // Client of this LDAP server is expected to get a connection timeout. - final Thread ldapServer = new Thread(new Runnable() { + final Thread ldapServer = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -469,7 +469,7 @@ public void testLdapReadTimeout() throws IOException, InterruptedException { // authenticate it successfully; but it will never reply to the following // query request. // Client of this LDAP server is expected to get a read timeout. - final Thread ldapServer = new Thread(new Runnable() { + final Thread ldapServer = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index 2fb5b6c22eb71..125c9e8b1dc80 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -33,6 +33,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -1023,12 +1024,12 @@ public Void run() throws Exception { }}); } - static class GetTokenThread extends Thread { + static class GetTokenThread extends HadoopThread { boolean runThread = true; volatile ConcurrentModificationException cme = null; @Override - public void run() { + public void work() { while(runThread) { try { UserGroupInformation.getCurrentUser().getCredentials(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java index ccbc0a009fbf5..0121d44678e6e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java @@ -25,6 +25,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -404,7 +405,7 @@ private AsyncSelfTerminatingService(int timeout) { @Override protected void serviceStart() throws Exception { - new Thread(this).start(); + new HadoopThread(this).start(); super.serviceStart(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java index 3093aa1ff5d58..6d508c668ca0b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +59,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - Thread thread = new Thread(this); + Thread thread = new HadoopThread(this); thread.setName(getName()); thread.start(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java index e270ee68000eb..fb2910c1813d0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java @@ -21,6 +21,7 @@ import java.util.Set; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -175,7 +176,7 @@ public Iterable getTestThreads() { * A thread that can be added to a test context, and properly * passes exceptions through. */ - public static abstract class TestingThread extends Thread { + public static abstract class TestingThread extends HadoopThread { protected final TestContext ctx; protected boolean stopped; @@ -184,7 +185,7 @@ public TestingThread(TestContext ctx) { } @Override - public void run() { + public void work() { try { doWork(); } catch (Throwable t) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java index 6805dcd2fd4b3..ae3a2ec46c152 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java @@ -23,6 +23,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -59,7 +60,7 @@ public Deadlock() { } } - class DeadlockThread extends Thread { + class DeadlockThread extends HadoopThread { private Lock lock1 = null; private Lock lock2 = null; @@ -84,7 +85,7 @@ class DeadlockThread extends Thread { this.useSync = false; } - public void run() { + public void work() { if (useSync) { syncLock(); } else { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java index 90beb58aee449..eebb8c334798d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java @@ -19,6 +19,9 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.apache.hadoop.util.concurrent.HadoopThread; + import static org.junit.jupiter.api.Assertions.assertEquals; import org.junit.jupiter.api.Test; @@ -54,9 +57,9 @@ public void testMultipleThread() throws Exception { AutoCloseableLock lock = new AutoCloseableLock(); lock.acquire(); assertTrue(lock.isLocked()); - Thread competingThread = new Thread() { + HadoopThread competingThread = new HadoopThread() { @Override - public void run() { + public void work() { assertTrue(lock.isLocked()); assertFalse(lock.tryLock()); } @@ -79,9 +82,9 @@ public void testTryWithResourceSyntax() throws Exception { try(AutoCloseableLock localLock = lock.acquire()) { assertEquals(localLock, lock); assertTrue(lock.isLocked()); - Thread competingThread = new Thread() { + HadoopThread competingThread = new HadoopThread() { @Override - public void run() { + public void work() { assertTrue(lock.isLocked()); assertFalse(lock.tryLock()); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java index fb9e773c7d06e..851776c770795 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -52,9 +53,9 @@ public void testMultipleThread(TestInfo testInfo) throws Exception { InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300); lock.lock(); try { - Thread competingThread = new Thread() { + HadoopThread competingThread = new HadoopThread() { @Override - public void run() { + public void work() { assertFalse(lock.tryLock()); } }; @@ -89,9 +90,9 @@ public void unlock() { AutoCloseableLock acl = new AutoCloseableLock(lock); try (AutoCloseable localLock = acl.acquire()) { assertEquals(acl, localLock); - Thread competingThread = new Thread() { + HadoopThread competingThread = new HadoopThread() { @Override - public void run() { + public void work() { assertNotEquals(Thread.currentThread(), lockThread.get()); assertFalse(lock.tryLock()); } @@ -253,7 +254,7 @@ void logWaitWarning(long lockHeldTime, SuppressedSnapshot stats) { private Thread lockUnlockThread(Lock lock) throws InterruptedException { CountDownLatch countDownLatch = new CountDownLatch(1); - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { try { assertFalse(lock.tryLock()); countDownLatch.countDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java index 6bb5d08e154df..085ac661e4b36 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; @@ -68,17 +69,17 @@ public void release() { final AutoCloseableLock readLock = new AutoCloseableLock( readWriteLock.readLock()); try (AutoCloseableLock lock = writeLock.acquire()) { - Thread competingWriteThread = new Thread() { + Thread competingWriteThread = new HadoopThread() { @Override - public void run() { + public void work() { assertFalse(writeLock.tryLock()); } }; competingWriteThread.start(); competingWriteThread.join(); - Thread competingReadThread = new Thread() { + Thread competingReadThread = new HadoopThread() { @Override - public void run() { + public void work() { assertFalse(readLock.tryLock()); }; }; @@ -104,18 +105,18 @@ public void testReadLock(TestInfo testInfo) throws Exception { final AutoCloseableLock writeLock = new AutoCloseableLock( readWriteLock.writeLock()); try (AutoCloseableLock lock = readLock.acquire()) { - Thread competingReadThread = new Thread() { + HadoopThread competingReadThread = new HadoopThread() { @Override - public void run() { + public void work() { assertTrue(readLock.tryLock()); readLock.release(); } }; competingReadThread.start(); competingReadThread.join(); - Thread competingWriteThread = new Thread() { + HadoopThread competingWriteThread = new HadoopThread() { @Override - public void run() { + public void work() { assertFalse(writeLock.tryLock()); } }; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java index cee0fcef092f2..a8fafe7c0c22c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java @@ -29,6 +29,7 @@ import java.util.zip.CRC32; import java.util.zip.Checksum; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -316,7 +317,7 @@ private static BenchResult doBench(Class clazz, final int numThreads, final byte[] bytes, final int size) throws Exception { - final Thread[] threads = new Thread[numThreads]; + final HadoopThread[] threads = new HadoopThread[numThreads]; final BenchResult[] results = new BenchResult[threads.length]; { @@ -326,11 +327,11 @@ private static BenchResult doBench(Class clazz, for(int i = 0; i < threads.length; i++) { final int index = i; - threads[i] = new Thread() { + threads[i] = new HadoopThread() { final Checksum crc = ctor.newInstance(); @Override - public void run() { + public void work() { final long st = System.nanoTime(); crc.reset(); for (int i = 0; i < trials; i++) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java index 7d8bece0d675d..f106dac28f55e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java @@ -31,6 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -70,11 +71,11 @@ private void doTestCache() { @Test public void testThreadSafe() throws Exception { - Thread[] th = new Thread[32]; + HadoopThread[] th = new HadoopThread[32]; for (int i=0; i void waitForAll(List> furtures) throws Exception { } } - static class AllocatorThread extends Thread { + static class AllocatorThread extends HadoopThread { private final ByteArrayManager bam; private final int arrayLength; private byte[] array; @@ -237,7 +238,7 @@ static class AllocatorThread extends Thread { } @Override - public void run() { + public void work() { try { array = bam.newByteArray(arrayLength); } catch (InterruptedException e) { @@ -333,9 +334,9 @@ public void testByteArrayManager() throws Exception { } final List exceptions = new ArrayList(); - final Thread randomRecycler = new Thread() { + final Thread randomRecycler = new HadoopThread() { @Override - public void run() { + public void work() { LOG.info("randomRecycler start"); for(int i = 0; shouldRun(); i++) { final int j = ThreadLocalRandom.current().nextInt(runners.length); @@ -524,7 +525,7 @@ public void run() { Thread start(int n) { this.n = n; - final Thread t = new Thread(this); + final Thread t = new HadoopThread(this); t.start(); return t; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java index cbbcccf3ca0ba..d19e23d85d1cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java @@ -22,6 +22,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +48,7 @@ public AsyncDataService() { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new Thread(threadGroup, r); + return new HadoopThread(threadGroup, r); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index 70ae4b29e9f96..591776e2090be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -246,7 +246,7 @@ void shouldRun(boolean shouldRun) { } @Override - public void run() { + public void work() { while (shouldRun) { scan(streamTimeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java index 46c16d3c7fa60..48aae61bac06a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; +import org.apache.hadoop.util.concurrent.HadoopThread; // TODO: convert this to Junit public class TestUdpServer { @@ -68,16 +69,16 @@ public static void main(String[] args) throws InterruptedException { //testDump(); } - static class Runtest1 extends Thread { + static class Runtest1 extends HadoopThread { @Override - public void run() { + public void work() { testGetportMount(); } } - static class Runtest2 extends Thread { + static class Runtest2 extends HadoopThread { @Override - public void run() { + public void work() { testDump(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java index 91af1ca06ac7e..3a9a17155416c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,7 +98,7 @@ private synchronized void updateSubclusterMapping() { if (subclusterMapping == null || (monotonicNow() - lastUpdated) > minUpdateTime) { // Fetch the mapping asynchronously - Thread updater = new Thread(new Runnable() { + Thread updater = new HadoopThread(new Runnable() { @Override public void run() { final MembershipStore membershipStore = getMembershipStore(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 2ffc5f0b5d893..414936f8d9729 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.eclipse.jetty.util.ajax.JSON; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -458,7 +459,7 @@ public void run() { /** * Thread that creates connections asynchronously. */ - static class ConnectionCreator extends Thread { + static class ConnectionCreator extends HadoopThread { /** If the creator is running. */ private boolean running = true; /** Queue to push work to. */ @@ -470,7 +471,7 @@ static class ConnectionCreator extends Thread { } @Override - public void run() { + public void work() { while (this.running) { try { ConnectionPool pool = this.queue.take(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java index 40ff843fa1dfe..edf108bad8393 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -25,13 +25,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for updating mount table cache on all the router. */ -public class MountTableRefresherThread extends Thread { +public class MountTableRefresherThread extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(MountTableRefresherThread.class); private boolean success; @@ -61,7 +62,7 @@ public MountTableRefresherThread(MountTableManager manager, * update cache on R2 and R3. */ @Override - public void run() { + public void work() { try { SecurityUtil.doAsLoginUser(() -> { if (UserGroupInformation.isSecurityEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 14cc47ffa1e6e..418e209a4fce5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -59,6 +59,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -391,9 +392,9 @@ protected void serviceStop() throws Exception { * Shutdown the router. */ public void shutDown() { - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { Router.this.stop(); } }.start(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java index 5607ab8109d26..fb3bd83a521a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,7 +64,7 @@ public RouterHeartbeatService(Router router) { * Trigger the update of the Router state asynchronously. */ protected void updateStateAsync() { - Thread thread = new Thread(this::updateStateStore, "Router Heartbeat Async"); + Thread thread = new HadoopThread(this::updateStateStore, "Router Heartbeat Async"); thread.setDaemon(true); thread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 77bebab4ade71..fd1059d168a8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -213,6 +213,7 @@ import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.checkerframework.checker.nullness.qual.NonNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -2507,7 +2508,7 @@ private static class AsyncThreadFactory implements ThreadFactory { @Override public Thread newThread(@NonNull Runnable r) { - Thread thread = new Thread(r, namePrefix + threadNumber.getAndIncrement()); + Thread thread = new HadoopThread(r, namePrefix + threadNumber.getAndIncrement()); thread.setDaemon(true); return thread; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index 3650ce60f7221..d12d9dfcc9c19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -133,7 +134,7 @@ public void testConcurrentRefreshRequests() throws InterruptedException { // Spawn 100 concurrent refresh requests Thread[] threads = new Thread[100]; for (int i = 0; i < 100; i++) { - threads[i] = new Thread(() -> + threads[i] = new HadoopThread(() -> client.refreshFairnessPolicyController(routerContext.getConf())); } @@ -182,7 +183,7 @@ public void testRefreshStaticChangeHandlers() throws Exception { final int newNs1Permits = 4; conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns0", newNs0Permits); conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns1", newNs1Permits); - Thread threadRefreshController = new Thread(() -> client. + Thread threadRefreshController = new HadoopThread(() -> client. refreshFairnessPolicyController(routerContext.getConf())); threadRefreshController.start(); threadRefreshController.join(); @@ -218,7 +219,7 @@ private List makeDummyInvocations(RouterRpcClient client, final int nThr RemoteMethod dummyMethod = Mockito.mock(RemoteMethod.class); List threadAcquirePermits = new ArrayList<>(); for (int i = 0; i < nThreads; i++) { - Thread threadAcquirePermit = new Thread(() -> { + Thread threadAcquirePermit = new HadoopThread(() -> { try { client.invokeSingle(namespace, dummyMethod); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index b43c87591d76c..f7345c525e15b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -45,6 +45,7 @@ import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -318,7 +319,7 @@ public void testCounter() throws Exception { int expectedSchedulerCount = rpcServer.getSchedulerJobCount() + 1; AtomicInteger maxSchedulerCount = new AtomicInteger(); AtomicBoolean watch = new AtomicBoolean(true); - Thread watcher = new Thread(() -> { + Thread watcher = new HadoopThread(() -> { while (watch.get()) { int schedulerCount = rpcServer.getSchedulerJobCount(); if (schedulerCount > maxSchedulerCount.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java index 373743299f032..3857b8ebbfacf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java @@ -324,7 +324,7 @@ protected MountTableRefresherThread getLocalRefresher( String adminAddress) { return new MountTableRefresherThread(null, adminAddress) { @Override - public void run() { + public void work() { try { // Sleep 1 minute Thread.sleep(60000); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index ddbfdc9727c3a..381eabd791e2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -152,6 +152,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * The the RPC interface of the {@link Router} implemented by @@ -2393,7 +2394,7 @@ public void testCallerContextNotResetByAsyncHandler() throws IOException { String dirPath = "/test"; // The reason we start this child thread is that CallContext use InheritableThreadLocal. - Thread t1 = new Thread(() -> { + HadoopThread t1 = new HadoopThread(() -> { // Set flag async:true. CallerContext.setCurrent( new CallerContext.Builder("async:true").build()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java index 805b955661d5c..3a46b04b9420f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java @@ -24,6 +24,8 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.apache.hadoop.util.concurrent.HadoopThread; + /** * SyncClass implements BaseClass, providing a synchronous * version of the methods. All operations are performed in a @@ -186,7 +188,7 @@ public String timeConsumingMethod(int input) { private ExecutorService getExecutorService() { return Executors.newFixedThreadPool(2, r -> { - Thread t = new Thread(r); + HadoopThread t = new HadoopThread(r); t.setDaemon(true); return t; }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d1f02c47e90b8..13e6364774fdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -130,7 +130,7 @@ import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; @@ -3960,7 +3960,7 @@ public void processMisReplicatedBlocks() { reconstructionQueuesInitializer = new Daemon() { @Override - public void run() { + public void work() { try { processMisReplicatesAsync(); } catch (InterruptedException ie) { @@ -5641,7 +5641,7 @@ public int getBlockOpQueueLength() { return blockReportThread.queue.size(); } - private class BlockReportProcessingThread extends Thread { + private class BlockReportProcessingThread extends HadoopThread { private long lastFull = 0; private final BlockingQueue queue; @@ -5653,7 +5653,7 @@ private class BlockReportProcessingThread extends Thread { } @Override - public void run() { + public void work() { try { processQueue(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 83c179bfe653f..496e2a2f4d933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.util.RwLockMode; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,7 +66,7 @@ * starts up, and at configurable intervals afterwards. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) -public class CacheReplicationMonitor extends Thread implements Closeable { +public class CacheReplicationMonitor extends HadoopThread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(CacheReplicationMonitor.class); @@ -159,7 +160,7 @@ public CacheReplicationMonitor(FSNamesystem namesystem, } @Override - public void run() { + public void work() { long startTimeMs = 0; Thread.currentThread().setName("CacheReplicationMonitor(" + System.identityHashCode(this) + ")"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java index 798b5fb5966f7..c8d793b745049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports.DiskOp; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Timer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -152,7 +153,7 @@ public void checkAndUpdateReportIfNecessary() { public void updateSlowDiskReportAsync(long now) { if (isUpdateInProgress.compareAndSet(false, true)) { lastUpdateTime = now; - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { slowDisksReport = getSlowDisks(diskIDLatencyMap, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 93303bcf807de..c3269b923fb1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -53,7 +53,7 @@ import org.apache.hadoop.io.nativeio.NativeIOException; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -849,8 +849,8 @@ private void deleteAsync(File curDir) throws IOException { deleteDir(curTmp); } rename(curDir, curTmp); - new Thread("Async Delete Current.tmp") { - public void run() { + new HadoopThread("Async Delete Current.tmp") { + public void work() { try { deleteDir(curTmp); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 45eeac85d6b36..31acda1f703a6 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -75,6 +75,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.apache.hadoop.classification.VisibleForTesting; @@ -599,7 +600,7 @@ void start() { //Thread is started already return; } - bpThread = new Thread(this); + bpThread = new HadoopThread(this); bpThread.setDaemon(true); // needed for JUnit testing if (lifelineSender != null) { @@ -1078,7 +1079,7 @@ public void run() { } public void start() { - lifelineThread = new Thread(this, + lifelineThread = new HadoopThread(this, formatThreadName("lifeline", lifelineNnAddr)); lifelineThread.setDaemon(true); lifelineThread.setUncaughtExceptionHandler( @@ -1384,7 +1385,7 @@ public long monotonicNow() { /** * CommandProcessingThread that process commands asynchronously. */ - class CommandProcessingThread extends Thread { + class CommandProcessingThread extends HadoopThread { private final BPServiceActor actor; private final BlockingQueue queue; @@ -1396,7 +1397,7 @@ class CommandProcessingThread extends Thread { } @Override - public void run() { + public void work() { try { processQueue(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index ef778791cfd9c..4fde1d992d1c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -262,6 +262,7 @@ import org.apache.hadoop.tracing.TraceUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tracing.Tracer; import org.eclipse.jetty.util.ajax.JSON; @@ -3855,8 +3856,8 @@ public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException // Asynchronously start the shutdown process so that the rpc response can be // sent back. - Thread shutdownThread = new Thread("Async datanode shutdown thread") { - @Override public void run() { + Thread shutdownThread = new HadoopThread("Async datanode shutdown thread") { + @Override public void work() { if (!shutdownForUpgrade) { // Delay the shutdown a bit if not doing for restart. try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index d8f1e23ec379b..35230be5aebc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +53,7 @@ * VolumeScanner scans a single volume. Each VolumeScanner has its own thread. *

They are all managed by the DataNode's BlockScanner. */ -public class VolumeScanner extends Thread { +public class VolumeScanner extends HadoopThread { public static final Logger LOG = LoggerFactory.getLogger(VolumeScanner.class); @@ -633,7 +634,7 @@ private synchronized ExtendedBlock popNextSuspectBlock() { } @Override - public void run() { + public void work() { // Record the minute on which the scanner started. this.startMinute = TimeUnit.MINUTES.convert(Time.monotonicNow(), TimeUnit.MILLISECONDS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index e5b23bb60e516..2aa5319c6e8f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -109,7 +110,7 @@ public Thread newThread(Runnable r) { synchronized (this) { thisIndex = counter++; } - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName("Async disk worker #" + thisIndex + " for volume " + volume); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 262a24bd3aa45..a9ecdd46bcb8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -49,6 +49,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; class FsVolumeList { private final CopyOnWriteArrayList volumes = @@ -260,8 +261,8 @@ void getAllVolumesMap(final String bpid, new ConcurrentHashMap(); List replicaAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new Thread() { - public void run() { + Thread t = new HadoopThread() { + public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid + " on volume " + v + "..."); @@ -507,8 +508,8 @@ void addBlockPool(final String bpid, final Configuration conf) throws IOExceptio new ConcurrentHashMap(); List blockPoolAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new Thread() { - public void run() { + Thread t = new HadoopThread() { + public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Scanning block pool " + bpid + " on volume " + v + "..."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java index 0d42ae99e358e..e295db58d67b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -82,7 +83,7 @@ private void addExecutorForVolume(final String storageId) { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(threadGroup, r); + Thread t = new HadoopThread(threadGroup, r); t.setName("Async RamDisk lazy persist worker " + " for volume with id " + storageId); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index 29b262598bf55..fb63a13fb33b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -128,7 +128,7 @@ void shutdown() { // The main work loop // @Override - public void run() { + public void work() { // How often to check the size of the edit log (min of checkpointCheckPeriod and checkpointPeriod) long periodMSec = checkpointConf.getCheckPeriod() * 1000; // How often to checkpoint regardless of number of txns diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 115e9485fa0a9..8106334307630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -78,7 +79,7 @@ private boolean isSyncThreadAlive() { private void startSyncThread() { synchronized(syncThreadLock) { if (!isSyncThreadAlive()) { - syncThread = new Thread(this, this.getClass().getSimpleName()); + syncThread = new HadoopThread(this, this.getClass().getSimpleName()); syncThread.start(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index fa321fe85e57b..40ff0829730f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -76,7 +76,7 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Preconditions; @@ -1247,7 +1247,7 @@ private synchronized void saveFSImageInAllDirs(FSNamesystem source, = storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { StorageDirectory sd = it.next(); FSImageSaver saver = new FSImageSaver(ctx, sd, nnf); - Thread saveThread = new Thread(saver, saver.toString()); + Thread saveThread = new HadoopThread(saver, saver.toString()); saveThreads.add(saveThread); saveThread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index edacb7eaafd00..d69ca71759f2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -75,6 +75,7 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @@ -184,7 +185,7 @@ public LoaderContext getLoaderContext() { * Thread to compute the MD5 of a file as this can be in parallel while * loading the image without interfering much. */ - private static class DigestThread extends Thread { + private static class DigestThread extends HadoopThread { /** * Exception thrown when computing the digest if it cannot be calculated. @@ -219,7 +220,7 @@ public IOException getException() { } @Override - public void run() { + public void work() { try { digest = MD5FileUtils.computeMd5ForFile(file); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index d48941203d3f0..47bb47466d6bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -103,7 +103,7 @@ import org.apache.hadoop.util.GcTimeMonitor.Builder; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.util.Timer; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1076,7 +1076,7 @@ public FileSystem run() throws IOException { return dfs; } }); - this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); + this.emptier = new HadoopThread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); this.emptier.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index cf416307f47d4..f4c0252a88329 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -37,6 +37,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.Timer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -475,7 +476,7 @@ void sleep(long sleepTimeMillis) throws InterruptedException { * The thread which does the actual work of tailing edits journals and * applying the transactions to the FSNS. */ - private class EditLogTailerThread extends Thread { + private class EditLogTailerThread extends HadoopThread { private volatile boolean shouldRun = true; private EditLogTailerThread() { @@ -487,7 +488,7 @@ private void setShouldRun(boolean shouldRun) { } @Override - public void run() { + public void work() { SecurityUtil.doAsLoginUserOrFatal( new PrivilegedAction() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index 8426bbe33023a..d310db425abe4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -50,6 +50,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; @@ -386,7 +387,7 @@ private long countUncheckpointedTxns() { img.getStorage().getMostRecentCheckpointTxId(); } - private class CheckpointerThread extends Thread { + private class CheckpointerThread extends HadoopThread { private volatile boolean shouldRun = true; private volatile long preventCheckpointsUntil = 0; @@ -399,7 +400,7 @@ private void setShouldRun(boolean shouldRun) { } @Override - public void run() { + public void work() { // We have to make sure we're logged in as far as JAAS // is concerned, in order to use kerberized SSL properly. SecurityUtil.doAsLoginUserOrFatal( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index 9a4d866e117d1..bc43045bbaeef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -51,6 +51,7 @@ import org.junit.jupiter.api.Test; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.event.Level; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -448,7 +449,7 @@ void start() { Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING)); if (thread.get() == null) { - final Thread t = new Thread(null, new Runnable() { + final Thread t = new HadoopThread(null, new Runnable() { @Override public void run() { for(State s; !(s = checkErrorState()).isTerminated;) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index f277b1a37b8d6..2335202111c43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -521,8 +522,8 @@ public boolean skipRollingRestartWait() { .getWrappedStream(); final AtomicBoolean running = new AtomicBoolean(true); final AtomicBoolean failed = new AtomicBoolean(false); - Thread t = new Thread() { - public void run() { + HadoopThread t = new HadoopThread() { + public void work() { while (running.get()) { try { out.write("test".getBytes()); @@ -866,7 +867,7 @@ public Boolean get() { dataNodes[0].shutdown(); // Shutdown the second datanode when the pipeline is closing. - new Thread(() -> { + new HadoopThread(() -> { try { GenericTestUtils.waitFor(new Supplier() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 8eb2f588228f0..8027487f1a639 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -90,6 +90,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -729,7 +730,7 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in Counter counter = new Counter(0); for (int i = 0; i < threads; ++i ) { DFSClientReader reader = new DFSClientReader(file1, cluster, hash_sha, fileLen, counter); - readers[i] = new Thread(reader); + readers[i] = new HadoopThread(reader); readers[i].start(); } @@ -1018,7 +1019,7 @@ public static void namenodeRestartTest(final Configuration conf, assertFalse(HdfsUtils.isHealthy(uri)); //namenode is down, continue writing file4 in a thread - final Thread file4thread = new Thread(new Runnable() { + final Thread file4thread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1037,7 +1038,7 @@ public void run() { file4thread.start(); //namenode is down, read the file in a thread - final Thread reader = new Thread(new Runnable() { + final Thread reader = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1057,7 +1058,7 @@ public void run() { //namenode is down, create another file in a thread final Path file3 = new Path(dir, "file"); - final Thread thread = new Thread(new Runnable() { + final Thread thread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1072,7 +1073,7 @@ public void run() { thread.start(); //restart namenode in a new thread - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1125,7 +1126,7 @@ public void run() { assertFalse(HdfsUtils.isHealthy(uri)); //leave safe mode in a new thread - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { try { @@ -1306,7 +1307,7 @@ public void delayWhenRenewLeaseTimeout() { out1.write(new byte[256]); - Thread closeThread = new Thread(new Runnable() { + Thread closeThread = new HadoopThread(new Runnable() { @Override public void run() { try { //1. trigger get LeaseRenewer lock diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java index d52b53d543206..b4b40197c278d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java @@ -60,6 +60,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -339,7 +340,7 @@ public void testCongestionAckDelay() { AtomicBoolean isDelay = new AtomicBoolean(true); // ResponseProcessor needs the dataQueue for the next step. - new Thread(() -> { + new HadoopThread(() -> { for (int i = 0; i < 10; i++) { // In order to ensure that other threads run for a period of time to prevent affecting // the results. @@ -376,7 +377,7 @@ public void testCongestionAckDelay() { // The purpose of adding packets to the dataQueue is to make the DataStreamer run // normally and judge whether to enter the sleep state according to the congestion. - new Thread(() -> { + new HadoopThread(() -> { for (int i = 0; i < 100; i++) { packet[i] = mock(DFSPacket.class); dataQueue.add(packet[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 2598dd44a374f..df349550ac487 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -70,6 +70,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.slf4j.event.Level; @@ -948,9 +949,9 @@ public void testTailWithFresh() throws Exception { final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); - final Thread tailer = new Thread() { + final HadoopThread tailer = new HadoopThread() { @Override - public void run() { + public void work() { final String[] argv = new String[]{"-tail", "-f", testFile.toString()}; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index c9c7020ed6c95..7e2c1e4aa9068 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -61,7 +62,7 @@ public class TestDatanodeDeath { // // an object that does a bunch of transactions // - static class Workload extends Thread { + static class Workload extends HadoopThread { private final short replication; private final int numberOfFiles; private final int id; @@ -81,7 +82,7 @@ static class Workload extends Thread { // create a bunch of files. Write to them and then verify. @Override - public void run() { + public void work() { System.out.println("Workload starting "); for (int i = 0; i < numberOfFiles; i++) { Path filename = new Path(id + "." + i); @@ -210,7 +211,7 @@ private static void checkData(byte[] actual, int from, byte[] expected, String m * a block do not get killed (otherwise the file will be corrupt and the * test will fail). */ - class Modify extends Thread { + class Modify extends HadoopThread { volatile boolean running; final MiniDFSCluster cluster; final Configuration conf; @@ -222,7 +223,7 @@ class Modify extends Thread { } @Override - public void run() { + public void work() { while (running) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java index df3fc4f8c4370..dfe072b41efca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -456,7 +457,7 @@ public void sync() { } private void startWaitForDeadNodeThread(DFSClient dfsClient, int size) { - new Thread(() -> { + new HadoopThread(() -> { DeadNodeDetector deadNodeDetector = dfsClient.getClientContext().getDeadNodeDetector(); while (deadNodeDetector.clearAndGetDetectedDeadNodes().size() != size) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 63f8dc226980b..cc93bd2b3bf88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -86,6 +86,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -864,7 +865,7 @@ public void testDecommissionWithOpenfileReporting() closedFileSet, openFilesMap, maxDnOccurance); final AtomicBoolean stopRedundancyMonitor = new AtomicBoolean(false); - Thread monitorThread = new Thread(new Runnable() { + Thread monitorThread = new HadoopThread(new Runnable() { @Override public void run() { while (!stopRedundancyMonitor.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index 16e1ea25b4b7a..164f677e231ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.MethodOrderer; @@ -259,8 +260,8 @@ public void testDecommissionWithURBlockForSameBlockGroup() throws Exception { // Decommission node in a new thread. Verify that node is decommissioned. final CountDownLatch decomStarted = new CountDownLatch(0); - Thread decomTh = new Thread() { - public void run() { + HadoopThread decomTh = new HadoopThread() { + public void work() { try { decomStarted.countDown(); decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED); @@ -995,7 +996,7 @@ public void testDecommissionWithMissingBlock() throws Exception { // Handle decommission nodes in a new thread. // Verify that nodes are decommissioned. final CountDownLatch decomStarted = new CountDownLatch(0); - new Thread( + new HadoopThread( () -> { try { decomStarted.countDown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index b7916ec560730..d6e3881db8c62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -42,6 +42,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -372,7 +373,7 @@ public void testSimpleAppend2() throws Exception { // // an object that does a bunch of appends to files // - class Workload extends Thread { + class Workload extends HadoopThread { private final int id; private final MiniDFSCluster cluster; private final boolean appendToNewBlock; @@ -385,7 +386,7 @@ class Workload extends Thread { // create a bunch of files. Write to them and then verify. @Override - public void run() { + public void work() { System.out.println("Workload " + id + " starting... "); for (int i = 0; i < numAppendsPerThread; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java index 5b3a96305a77d..f1906bc021a4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.mockito.invocation.InvocationOnMock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -552,9 +553,9 @@ public HdfsFileStatus answer(InvocationOnMock invocation){ DFSClientAdapter.setDFSClient(fs, spyClient); // Create two threads for doing appends to the same file. - Thread worker1 = new Thread() { + HadoopThread worker1 = new HadoopThread() { @Override - public void run() { + public void work() { try { doSmallAppends(file, fs, 20); } catch (IOException e) { @@ -562,9 +563,9 @@ public void run() { } }; - Thread worker2 = new Thread() { + HadoopThread worker2 = new HadoopThread() { @Override - public void run() { + public void work() { try { doSmallAppends(file, fs, 20); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index b10439b248bfb..b2a3bac54f830 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -165,9 +166,9 @@ public void testRecoverFinalizedBlock() throws Throwable { // write 1/2 block AppendTestUtil.write(stm, 0, 4096); final AtomicReference err = new AtomicReference(); - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { try { stm.close(); } catch (Throwable t) { @@ -237,9 +238,9 @@ public void testCompleteOtherLeaseHoldersFile() throws Throwable { // write 1/2 block AppendTestUtil.write(stm, 0, 4096); final AtomicReference err = new AtomicReference(); - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { try { stm.close(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java index a8d3c52fc6fef..77b640a532b85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java @@ -38,6 +38,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; @@ -221,7 +222,7 @@ public void testImmediateReadOfNewFile() final AtomicReference errorMessage = new AtomicReference(); final FSDataOutputStream out = fileSystem.create(file); - final Thread writer = new Thread(new Runnable() { + final Thread writer = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -241,7 +242,7 @@ public void run() { } }); - Thread opener = new Thread(new Runnable() { + Thread opener = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -346,7 +347,7 @@ private void runTestUnfinishedBlockCRCError( final AtomicBoolean writerStarted = new AtomicBoolean(false); final AtomicBoolean error = new AtomicBoolean(false); - final Thread writer = new Thread(new Runnable() { + final Thread writer = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -379,7 +380,7 @@ public void run() { } } }); - Thread tailer = new Thread(new Runnable() { + Thread tailer = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java index 41b5b340c8805..35eef25724ed0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -113,7 +114,7 @@ public void testClientTriggeredLeaseRecovery() throws Exception { } } - static class SlowWriter extends Thread { + static class SlowWriter extends HadoopThread { final FileSystem fs; final Path filepath; boolean running = true; @@ -125,7 +126,7 @@ static class SlowWriter extends Thread { } @Override - public void run() { + public void work() { FSDataOutputStream out = null; int i = 0; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index 48666f68dfdd6..9bef9542ae666 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -35,6 +35,7 @@ import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; /** @@ -71,7 +72,7 @@ private void initBuffer(int size) { toWrite = AppendTestUtil.randomBytes(seed, size); } - private class WriterThread extends Thread { + private class WriterThread extends HadoopThread { private final FSDataOutputStream stm; private final AtomicReference thrown; private final int numWrites; @@ -87,7 +88,7 @@ public WriterThread(FSDataOutputStream stm, } @Override - public void run() { + public void work() { try { countdown.await(); for (int i = 0; i < numWrites && thrown.get() == null; i++) { @@ -162,9 +163,9 @@ public void testHflushWhileClosing() throws Throwable { final AtomicReference thrown = new AtomicReference(); try { for (int i = 0; i < 10; i++) { - Thread flusher = new Thread() { + HadoopThread flusher = new HadoopThread() { @Override - public void run() { + public void work() { try { while (true) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index 06d8aec4ffcbd..a7d2472bba928 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -189,7 +190,7 @@ public int pRead(DFSInputStream dis, byte[] target, int startOff, int len) /** * A worker to do one "unit" of read. */ - static class ReadWorker extends Thread { + static class ReadWorker extends HadoopThread { static public final int N_ITERATIONS = 1024; @@ -215,7 +216,7 @@ static class ReadWorker extends Thread { * Randomly do one of (1) Small read; and (2) Large Pread. */ @Override - public void run() { + public void work() { for (int i = 0; i < N_ITERATIONS; ++i) { int startOff = rand.nextInt((int) fileSize); int len = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java index 8f5ba9018dfa7..cfda31d91d1bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.log4j.Level; import org.junit.jupiter.api.Assertions; @@ -159,7 +160,7 @@ public void testInterruptReader() throws Exception { final FSDataInputStream in = fs.open(file); AtomicBoolean readInterrupted = new AtomicBoolean(false); - final Thread reader = new Thread(new Runnable() { + final Thread reader = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java index a52eba866c07f..f47b5bbb746e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -244,7 +245,7 @@ static void sleepSeconds(final int waittime) throws InterruptedException { Thread.sleep(waittime * 1000L); } - static class SlowWriter extends Thread { + static class SlowWriter extends HadoopThread { private final Path filepath; private final HdfsDataOutputStream out; private final long sleepms; @@ -258,7 +259,7 @@ static class SlowWriter extends Thread { this.sleepms = sleepms; } - @Override public void run() { + @Override public void work() { int i = 0; try { sleep(sleepms); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index e2df609095567..bfc4bbcefcb88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -222,7 +223,7 @@ static void sleepSeconds(final int waittime) throws InterruptedException { Thread.sleep(waittime * 1000L); } - static class SlowWriter extends Thread { + static class SlowWriter extends HadoopThread { final Path filepath; final HdfsDataOutputStream out; final long sleepms; @@ -237,7 +238,7 @@ static class SlowWriter extends Thread { } @Override - public void run() { + public void work() { int i = 0; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java index 810f7e1864d17..fb131fe780acf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java @@ -73,6 +73,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -235,7 +236,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new HadoopThread(readerRunnable); threads[i].start(); } Thread.sleep(500); @@ -334,7 +335,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new HadoopThread(readerRunnable); threads[i].start(); } gotFailureLatch.await(); @@ -640,7 +641,7 @@ public void run() { } } }; - Thread thread = new Thread(readerRunnable); + Thread thread = new HadoopThread(readerRunnable); thread.start(); // While the thread is reading, send it interrupts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java index 230945d09631c..a81db9bc6f62a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java @@ -34,7 +34,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.VersionInfo; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.Test; import java.lang.management.ManagementFactory; @@ -105,7 +105,7 @@ private long addOneDataNode(Configuration conf) throws Exception { } private Thread newBalancerService(Configuration conf, String[] args) { - return new Thread(new Runnable() { + return new HadoopThread(new Runnable() { @Override public void run() { Tool cli = new Balancer.Cli(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index b6f27ca0c2329..8d6fc050a5dc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; @@ -1522,7 +1523,7 @@ public void testAsyncIBR() throws Exception { Thread[] writers = new Thread[numWriters]; for (int i=0; i < writers.length; i++) { final Path p = new Path("/writer"+i); - writers[i] = new Thread(new Runnable() { + writers[i] = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index 0dfe3ae509752..38a1494c486db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -70,6 +70,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -915,7 +916,7 @@ private Block findBlock(Path path, long size) throws IOException { return ret; } - private class BlockChecker extends Thread { + private class BlockChecker extends HadoopThread { final Path filePath; public BlockChecker(final Path filePath) { @@ -923,7 +924,7 @@ public BlockChecker(final Path filePath) { } @Override - public void run() { + public void work() { try { startDNandWait(filePath, true); } catch (Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index da89b85f6de78..9ef31ccc5863f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -87,6 +87,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -337,7 +338,7 @@ public void blockUtilSendFullBlockReport() { }); countBlockReportItems(FAKE_BLOCK, mockNN1, blocks); - addNewBlockThread = new Thread(() -> { + addNewBlockThread = new HadoopThread(() -> { for (int i = 0; i < totalTestBlocks; i++) { SimulatedFSDataset fsDataset = (SimulatedFSDataset) mockFSDataset; SimulatedStorage simulatedStorage = fsDataset.getStorages().get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 570d41a69dba4..2afc98e8c36c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -96,6 +96,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -917,7 +918,7 @@ private void testStopWorker(final TestStopWorkerRunnable tswr) final RecoveringBlock recoveringBlock = Iterators.get(recoveringBlocks.iterator(), 0); final ExtendedBlock block = recoveringBlock.getBlock(); - Thread slowWriterThread = new Thread(new Runnable() { + Thread slowWriterThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -944,7 +945,7 @@ public void run() { progressParent.uninterruptiblyAcquire(60000); // Start a worker thread which will attempt to stop the writer. - Thread stopWriterThread = new Thread(new Runnable() { + Thread stopWriterThread = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java index 74d18b55c6cdc..7365e1076e70b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.TestName; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -248,7 +249,7 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() final DataNode dataNode = cluster.getDataNodes().get(0); final AtomicBoolean recoveryInitResult = new AtomicBoolean(true); - Thread recoveryThread = new Thread(() -> { + Thread recoveryThread = new HadoopThread(() -> { try { DatanodeInfo[] locations = block.getLocations(); final BlockRecoveryCommand.RecoveringBlock recoveringBlock = @@ -367,7 +368,7 @@ public void testEcRecoverBlocks() throws Throwable { // write 5MB File AppendTestUtil.write(stm, 0, 1024 * 1024 * 5); final AtomicReference err = new AtomicReference<>(); - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { try { stm.close(); } catch (Throwable t1) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index babce8d5833cf..b27faa1259d1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -51,6 +51,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -567,7 +568,7 @@ public void testAddVolumesConcurrently() // Thread to list all storage available at DataNode, // when the volumes are being added in parallel. - final Thread listStorageThread = new Thread(new Runnable() { + final HadoopThread listStorageThread = new HadoopThread(new Runnable() { @Override public void run() { while (addVolumeCompletionLatch.getCount() != newVolumeCount) { @@ -591,7 +592,7 @@ public void run() { public Object answer(InvocationOnMock invocationOnMock) throws Throwable { final Random r = new Random(); Thread addVolThread = - new Thread(new Runnable() { + new HadoopThread(new Runnable() { @Override public void run() { try { @@ -928,7 +929,7 @@ public void logDelaySendingAckToUpstream( final DataNode dataNode = dn; final CyclicBarrier reconfigBarrier = new CyclicBarrier(2); - Thread reconfigThread = new Thread(() -> { + Thread reconfigThread = new HadoopThread(() -> { try { reconfigBarrier.await(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index f4d66f8c8d001..a20bf94b2ac07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -82,6 +82,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -441,7 +442,7 @@ public void delayWhenOfferServiceHoldLock() { BPServiceActor actor = service.getBPServiceActors().get(0); DatanodeRegistration bpRegistration = actor.getBpRegistration(); - Thread register = new Thread(() -> { + Thread register = new HadoopThread(() -> { try { service.registrationSucceeded(actor, bpRegistration); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java index 81f6020088965..15c7f71f0922f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -85,7 +86,7 @@ public void testBaseFunc() { @Test @Timeout(value = 5) public void testAcquireWriteLockError() throws InterruptedException { - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { manager.readLock(LockLevel.BLOCK_POOl, "test"); manager.writeLock(LockLevel.BLOCK_POOl, "test"); }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java index c6b5592c3c01d..1e5b979789c8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java @@ -34,6 +34,7 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -95,7 +96,7 @@ public NullDataNode(Configuration conf, OutputStream out, int port) throws any(StorageType.class), any(String.class), any(ExtendedBlock.class), anyBoolean()); - new Thread(new NullServer(port)).start(); + new HadoopThread(new NullServer(port)).start(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index 200c30a5abb0e..6b4f4116a06ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -381,7 +382,7 @@ public void testConcurrentAddBlockPool() throws InterruptedException, IOException { final String[] bpids = {"BP-TEST1-", "BP-TEST2-"}; final SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf); - class AddBlockPoolThread extends Thread { + class AddBlockPoolThread extends HadoopThread { private int id; private IOException ioe; public AddBlockPoolThread(int id) { @@ -394,7 +395,7 @@ public void test() throws InterruptedException, IOException { throw ioe; } } - public void run() { + public void work() { for (int i=0; i < 10000; i++) { // add different block pools concurrently String newbpid = bpids[id] + i; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 883290ef41c4e..df7e08fd8098b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -84,6 +84,7 @@ import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -648,9 +649,9 @@ public void testConcurrentWriteAndDeleteBlock() throws Exception { Random random = new Random(); // Random write block and delete half of them. for (int i = 0; i < threadCount; i++) { - Thread thread = new Thread() { + HadoopThread thread = new HadoopThread() { @Override - public void run() { + public void work() { try { String bpid = BLOCK_POOL_IDS[random.nextInt(BLOCK_POOL_IDS.length)]; for (int blockId = 0; blockId < numBlocks; blockId++) { @@ -931,8 +932,8 @@ public void testRemoveVolumeBeingWritten() throws Exception { final CountDownLatch blockReportReceivedLatch = new CountDownLatch(1); final CountDownLatch volRemoveStartedLatch = new CountDownLatch(1); final CountDownLatch volRemoveCompletedLatch = new CountDownLatch(1); - class BlockReportThread extends Thread { - public void run() { + class BlockReportThread extends HadoopThread { + public void work() { // Lets wait for the volume remove process to start try { volRemoveStartedLatch.await(); @@ -946,8 +947,8 @@ public void run() { } } - class ResponderThread extends Thread { - public void run() { + class ResponderThread extends HadoopThread { + public void work() { try (ReplicaHandler replica = dataset .createRbw(StorageType.DEFAULT, null, eb, false)) { LOG.info("CreateRbw finished"); @@ -973,8 +974,8 @@ public void run() { } } - class VolRemoveThread extends Thread { - public void run() { + class VolRemoveThread extends HadoopThread { + public void work() { Set volumesToRemove = new HashSet<>(); try { volumesToRemove.add(dataset.getVolume(eb).getStorageLocation()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java index 6c00e9690bb91..b4a06570f1c71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java @@ -42,6 +42,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -386,9 +387,9 @@ public void testAddRplicaProcessorForAddingReplicaInMap() throws Exception { ExecutorService pool = Executors.newFixedThreadPool(10); List> futureList = new ArrayList<>(); for (int i = 0; i < 100; i++) { - Thread thread = new Thread() { + HadoopThread thread = new HadoopThread() { @Override - public void run() { + public void work() { for (int j = 0; j < 10; j++) { try { DFSTestUtil.createFile(fs, new Path("File_" + getName() + j), 10, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index f1f57a9714f02..c359edb649f60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -196,7 +197,7 @@ public void run() { Thread threads[] = new Thread[NUM_TASKS]; for (int i = 0; i < NUM_TASKS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new HadoopThread(readerRunnable); threads[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java index c29c2bf1bc855..72783a401c700 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -175,11 +176,11 @@ public void testFsDatasetImplDeepCopyReplica() { modifyThread.setShouldRun(false); } - private class ModifyThread extends Thread { + private class ModifyThread extends HadoopThread { private boolean shouldRun = true; @Override - public void run() { + public void work() { FSDataOutputStream os = null; while (shouldRun) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java index ef84c1732d5a9..bbb3c0552354a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java @@ -552,7 +552,7 @@ private static class Writer extends Daemon { } @Override - public void run() { + public void work() { /** * Create a file, write up to 3 blocks of data and close the file. * Do this in a loop until we are told to stop. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index 073bb532ddf6d..ef3a9e325fb44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -719,8 +720,8 @@ public Object answer(InvocationOnMock invocation) { getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); final String newDirs = oldDirs.get(0); LOG.info("Reconfigure newDirs:" + newDirs); - Thread reconfigThread = new Thread() { - public void run() { + HadoopThread reconfigThread = new HadoopThread() { + public void work() { try { LOG.info("Waiting for work plan creation!"); createWorkPlanLatch.await(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index ca29433dbd607..6511b4407cc12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -85,6 +85,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.event.Level; /** @@ -422,7 +423,7 @@ void printStats() { /** * One of the threads that perform stats operations. */ - private class StatsDaemon extends Thread { + private class StatsDaemon extends HadoopThread { private final int daemonId; private int opsPerThread; private String arg1; // argument passed to executeOp() @@ -438,7 +439,7 @@ private class StatsDaemon extends Thread { } @Override - public void run() { + public void work() { localNumOpsExecuted = 0; localCumulativeTime = 0; arg1 = statsOp.getExecutionArgument(daemonId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 61c147e6e5ea7..7ccdd625306b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -39,6 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -315,7 +316,7 @@ public void testAuditLoggerWithCallContext() throws IOException { .build(); CallerContext.setCurrent(context); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - Thread child = new Thread(new Runnable() + Thread child = new HadoopThread(new Runnable() { @Override public void run() { @@ -342,7 +343,7 @@ public void run() { .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING)) .build(); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - child = new Thread(new Runnable() + child = new HadoopThread(new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 73a4a52611b44..c75b1e25840a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -88,6 +88,7 @@ import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -2611,7 +2612,7 @@ private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary /** * A utility class to perform a checkpoint in a different thread. */ - private static class DoCheckpointThread extends Thread { + private static class DoCheckpointThread extends HadoopThread { private final SecondaryNameNode snn; private volatile Throwable thrown = null; @@ -2620,7 +2621,7 @@ private static class DoCheckpointThread extends Thread { } @Override - public void run() { + public void work() { try { snn.doCheckpoint(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index d9002f83948f8..68ce8abe4e872 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -59,6 +59,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.Whitebox; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -108,7 +109,7 @@ private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception { "/"), "s1"); } - Thread deleteThread = new DeleteThread(fs, filePath); + HadoopThread deleteThread = new DeleteThread(fs, filePath); deleteThread.start(); try { @@ -148,7 +149,7 @@ public DatanodeStorageInfo[] chooseTarget(String srcPath, } } - private class DeleteThread extends Thread { + private class DeleteThread extends HadoopThread { private FileSystem fs; private Path path; @@ -158,7 +159,7 @@ private class DeleteThread extends Thread { } @Override - public void run() { + public void work() { try { Thread.sleep(1000); LOG.info("Deleting" + path); @@ -177,7 +178,7 @@ public void run() { } } - private class RenameThread extends Thread { + private class RenameThread extends HadoopThread { private FileSystem fs; private Path from; private Path to; @@ -189,7 +190,7 @@ private class RenameThread extends Thread { } @Override - public void run() { + public void work() { try { Thread.sleep(1000); LOG.info("Renaming " + from + " to " + to); @@ -456,14 +457,14 @@ public void testOpenRenameRace() throws Exception { // 6.release writeLock, it's fair lock so open thread gets read lock. // 7.open thread unlocks, rename gets write lock and does rename. // 8.rename thread unlocks, open thread gets write lock and update time. - Thread open = new Thread(() -> { + Thread open = new HadoopThread(() -> { try { openSem.release(); fsn.getBlockLocations("foo", src, 0, 5); } catch (IOException e) { } }); - Thread rename = new Thread(() -> { + Thread rename = new HadoopThread(() -> { try { openSem.acquire(); renameSem.release(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 84e02d273a89d..b95f994193c9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -90,6 +90,7 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.LogManager; import org.apache.log4j.spi.LoggingEvent; @@ -501,7 +502,7 @@ private void testEditLog(int initialSize) throws IOException { for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS, i*NUM_TRANSACTIONS); - threadId[i] = new Thread(trans, "TransactionThread-" + i); + threadId[i] = new HadoopThread(trans, "TransactionThread-" + i); threadId[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 8d3effb511fc2..019118a039d28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -67,6 +67,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.mockito.ArgumentMatcher; import org.slf4j.event.Level; import org.junit.jupiter.api.Test; @@ -205,7 +206,7 @@ private void startTransactionWorkers(MiniDFSCluster cluster, // Create threads and make them run transactions concurrently. for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(cluster, caughtErr); - new Thread(trans, "TransactionThread-" + i).start(); + new HadoopThread(trans, "TransactionThread-" + i).start(); workers.add(trans); } } @@ -425,9 +426,9 @@ public void testSaveImageWhileSyncInProgress() throws Exception { new AtomicReference(); final CountDownLatch waitToEnterFlush = new CountDownLatch(1); - final Thread doAnEditThread = new Thread() { + final HadoopThread doAnEditThread = new HadoopThread() { @Override - public void run() { + public void work() { try { LOG.info("Starting mkdirs"); namesystem.mkdirs("/test", @@ -518,9 +519,9 @@ public void testSaveRightBeforeSync() throws Exception { new AtomicReference(); final CountDownLatch sleepingBeforeSync = new CountDownLatch(1); - final Thread doAnEditThread = new Thread() { + final HadoopThread doAnEditThread = new HadoopThread() { @Override - public void run() { + public void work() { try { LOG.info("Starting setOwner"); namesystem.writeLock(RwLockMode.FS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index 0ffe35ca3e150..ee4a6ddda8b00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -29,6 +29,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -268,9 +269,9 @@ public void testFSReadLockLongHoldingReport() throws Exception { // Track but do not Report if it's held for a long time when re-entering // read lock but time since last report does not exceed the suppress // warning interval - Thread tLong = new Thread() { + HadoopThread tLong = new HadoopThread() { @Override - public void run() { + public void work() { fsnLock.readLock(); // Add one lock hold which is the longest, but occurs under a different // stack trace, to ensure this is the one that gets logged @@ -298,7 +299,7 @@ public void run() { fsnLock.readUnlock(); // Assert that stack trace eventually logged is the one for the longest hold String stackTracePatternString = - String.format("INFO.+%s(.+\n){5}\\Q%%s\\E\\.run", readLockLogStmt); + String.format("INFO.+%s(.+\n){5}\\Q%%s\\E\\.work", readLockLogStmt); Pattern tLongPattern = Pattern.compile( String.format(stackTracePatternString, tLong.getClass().getName())); assertTrue(tLongPattern.matcher(logs.getOutput()).find()); @@ -318,9 +319,9 @@ public void run() { logs.clearOutput(); final CountDownLatch barrier = new CountDownLatch(1); final CountDownLatch barrier2 = new CountDownLatch(1); - Thread t1 = new Thread() { + HadoopThread t1 = new HadoopThread() { @Override - public void run() { + public void work() { try { fsnLock.readLock(); timer.advance(readLockReportingThreshold + 1); @@ -332,9 +333,9 @@ public void run() { } } }; - Thread t2 = new Thread() { + HadoopThread t2 = new HadoopThread() { @Override - public void run() { + public void work () { try { barrier.await(); // Wait until t1 finishes sleeping fsnLock.readLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java index f18ff3020aa05..4f89d75b55ad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java @@ -41,6 +41,7 @@ import org.apache.hadoop.metrics2.impl.ConfigBuilder; import org.apache.hadoop.metrics2.impl.TestMetricsConfig; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.eclipse.jetty.util.ajax.JSON; @@ -55,10 +56,10 @@ public class TestFSNamesystemMBean { * JMX properties. If it can access all the properties, the test is * considered successful. */ - private static class MBeanClient extends Thread { + private static class MBeanClient extends HadoopThread { private boolean succeeded = false; @Override - public void run() { + public void work() { try { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 6e7ed2e82f40f..af87005630d78 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -68,6 +68,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -255,7 +256,7 @@ public void delay() { DataNodeFaultInjector.set(injector); // Truncate by using different client name. - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { String hdfsCacheDisableKey = "fs.hdfs.impl.disable.cache"; boolean originCacheDisable = conf.getBoolean(hdfsCacheDisableKey, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index 2dffeaee5bdb9..49817e9a14999 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -165,12 +166,12 @@ protected void execute() throws Throwable { * implementation class, the thread is notified: other threads can wait * for it to terminate */ - private abstract class TestThread extends Thread { + private abstract class TestThread extends HadoopThread { volatile Throwable thrown; protected volatile boolean live = true; @Override - public void run() { + public void work() { try { execute(); } catch (Throwable throwable) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 6c6c2b0008d52..93c85ad50e2c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.ChunkedArrayList; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -222,7 +223,7 @@ public void testListOpenFilesInHA() throws Exception { final AtomicBoolean failoverCompleted = new AtomicBoolean(false); final AtomicBoolean listOpenFilesError = new AtomicBoolean(false); final int listingIntervalMsec = 250; - Thread clientThread = new Thread(new Runnable() { + Thread clientThread = new HadoopThread(new Runnable() { @Override public void run() { while(!failoverCompleted.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 4387f7679bce3..dfbf0b46686c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -218,7 +219,7 @@ public void testMetaSaveOverwrite() throws Exception { } } - class MetaSaveThread extends Thread { + class MetaSaveThread extends HadoopThread { NamenodeProtocols nnRpc; String filename; public MetaSaveThread(NamenodeProtocols nnRpc, String filename) { @@ -227,7 +228,7 @@ public MetaSaveThread(NamenodeProtocols nnRpc, String filename) { } @Override - public void run() { + public void work() { try { nnRpc.metaSave(filename); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java index a27e4d8676ecb..8dd7a6e3d359f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java @@ -27,6 +27,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.StopWatch; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.test.Whitebox; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -177,8 +178,8 @@ public void testThrottleAccumulatingTasks() throws Exception { zst.addTask(mock); } - Thread removeTaskThread = new Thread() { - public void run() { + HadoopThread removeTaskThread = new HadoopThread() { + public void work() { try { Thread.sleep(3000); } catch (InterruptedException ie) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index 368a9a8460010..4eef88fcd3798 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -38,6 +38,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; @@ -136,7 +137,7 @@ public void testEditLog() throws IOException { Thread threadId[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS); - threadId[i] = new Thread(trans, "TransactionThread-" + i); + threadId[i] = new HadoopThread(trans, "TransactionThread-" + i); threadId[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index ffcf72ad9ab07..0313a2e488c21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -62,6 +62,7 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.util.concurrent.HadoopThread; public class TestBootstrapStandby { private static final Logger LOG = @@ -401,7 +402,7 @@ public void testRateThrottling() throws Exception { final int timeOut = (int)(imageFile.length() / minXferRatePerMS) + 1; // A very low DFS_IMAGE_TRANSFER_RATE_KEY value won't affect bootstrapping final AtomicBoolean bootStrapped = new AtomicBoolean(false); - new Thread( + new HadoopThread( new Runnable() { @Override public void run() { @@ -431,7 +432,7 @@ public Boolean get() { // A very low DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY value should // cause timeout bootStrapped.set(false); - new Thread( + new HadoopThread( new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java index ff6c2288b538b..57be6dddce6ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java @@ -55,6 +55,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -150,7 +151,7 @@ public void testMsyncSimple() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { // this read will block until roll and tail edits happen. dfs.getFileStatus(testPath); @@ -200,7 +201,7 @@ private void testMsync(boolean autoMsync, long autoMsyncPeriodMs) dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { // After msync, client should have the latest state ID from active. // Therefore, the subsequent getFileStatus call should succeed. @@ -289,7 +290,7 @@ public void testCallFromNewClient() throws Exception { (DistributedFileSystem) FileSystem.get(conf2); dfs2.getClient().getHAServiceState(); - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { dfs2.getFileStatus(testPath); readStatus.set(1); @@ -330,7 +331,7 @@ public void testUncoordinatedCall() throws Exception { AtomicInteger readStatus = new AtomicInteger(0); // create a separate thread to make a blocking read. - Thread reader = new Thread(() -> { + Thread reader = new HadoopThread(() -> { try { // this read call will block until server state catches up. But due to // configuration, this will take a very long time. @@ -435,7 +436,7 @@ public void testRpcQueueTimeNumOpsMetrics() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(new Runnable() { + Thread reader = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 168273117b50f..43307daff3e4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -259,9 +260,9 @@ public void testDelegationTokenDuringNNFailover() throws Exception { HAServiceState.STANDBY.toString(), e); } - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { cluster.transitionToActive(1); } catch (Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index 0ac9eae7f540d..e7e66490c9d73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -68,7 +68,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.Lists; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -140,9 +140,9 @@ public void testClientRetrySafeMode() throws Exception { .getBlockManager()); assertTrue(nn0.getNamesystem().isInStartupSafeMode()); LOG.info("enter safemode"); - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { boolean mkdir = fs.mkdirs(test); LOG.info("mkdir finished, result is " + mkdir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 8f8dd59a1fbfb..38592f30eb0f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -91,6 +91,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.RetryCache.CacheEntry; import org.apache.hadoop.util.LightWeightCache; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -1309,9 +1310,9 @@ public void testClientRetryWithFailover(final AtMostOnceOp op) // set DummyRetryInvocationHandler#block to true DummyRetryInvocationHandler.block.set(true); - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { op.invoke(); Object result = op.getResult(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 98f5c5e3102a3..c973e9f858344 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -53,6 +53,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Before; @@ -550,9 +551,9 @@ public void testReadsAllowedDuringCheckpoint() throws Exception { ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); // Perform an RPC that needs to take the write lock. - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { try { nns[1].getRpcServer().restoreFailedStorage("false"); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 04a34160facdc..d361ad5215653 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -700,7 +701,7 @@ public void testOpenFileWritingAcrossSnapDeletion() throws Exception { final AtomicBoolean writerError = new AtomicBoolean(false); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch deleteLatch = new CountDownLatch(1); - Thread t = new Thread(new Runnable() { + Thread t = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index 19fed937ebb53..a14a228e8f98c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -62,6 +62,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -563,11 +564,11 @@ public static void main(String[] args) throws Exception { long start = Time.now(); final int iteration = 20; - Thread[] threads = new Thread[threadCount]; + HadoopThread[] threads = new HadoopThread[threadCount]; for (int i = 0; i < threadCount; i++) { - threads[i] = new Thread() { + threads[i] = new HadoopThread() { @Override - public void run() { + public void work() { for (int i = 0; i < iteration; i++) { try { String user = getCurrentUser(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java index 598d18e9f0bb8..c47adc8afbf9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.util; import org.apache.hadoop.hdfs.server.namenode.AclFeature; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -85,13 +86,13 @@ public void testRefCountMapConcurrently() throws Exception { assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2)); } - class PutThread extends Thread { + class PutThread extends HadoopThread { private ReferenceCountMap referenceCountMap; PutThread(ReferenceCountMap referenceCountMap) { this.referenceCountMap = referenceCountMap; } @Override - public void run() { + public void work() { for (int i = 0; i < LOOP_COUNTER; i++) { referenceCountMap.put(aclFeature1); referenceCountMap.put(aclFeature2); @@ -99,13 +100,13 @@ public void run() { } }; - class RemoveThread extends Thread { + class RemoveThread extends HadoopThread { private ReferenceCountMap referenceCountMap; RemoveThread(ReferenceCountMap referenceCountMap) { this.referenceCountMap = referenceCountMap; } @Override - public void run() { + public void work() { for (int i = 0; i < LOOP_COUNTER; i++) { referenceCountMap.remove(aclFeature1); referenceCountMap.remove(aclFeature2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 922876c598d0e..7a8dd4d5a0529 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -57,6 +57,7 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.Whitebox; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.eclipse.jetty.util.ajax.JSON; @@ -296,9 +297,9 @@ public void testRetryWhileNNStartup() throws Exception { final NamenodeProtocols rpcServer = namenode.getRpcServer(); Whitebox.setInternalState(namenode, "rpcServer", null); - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { boolean result = false; FileSystem fs = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 5924a8dedcef3..769924fd39d54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -51,6 +51,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Timeout; import org.opentest4j.TestAbortedException; @@ -325,9 +326,9 @@ public void testTwoStepWriteReadTimeout(TimeoutSource src) throws Exception { private void startSingleTemporaryRedirectResponseThread( final boolean consumeConnectionBacklog) { fs.connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; - serverThread = new Thread() { + serverThread = new HadoopThread() { @Override - public void run() { + public void work() { Socket clientSocket = null; OutputStream out = null; InputStream in = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index 4c245136efb41..47d651c9566f6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -64,6 +64,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -151,7 +152,7 @@ public void serviceStart() throws Exception { HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder(). setDaemon(true).setNameFormat("uber-SubtaskRunner").build()); // create and start an event handling thread - eventHandler = new Thread(new EventHandler(), "uber-EventHandler"); + eventHandler = new HadoopThread(new EventHandler(), "uber-EventHandler"); // if the job classloader is specified, set it onto the event handler as the // thread context classloader so that it can be used by the event handler // as well as the subtask runner threads diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 6ab06696c19df..72b395f11906b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -65,6 +65,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; @@ -351,7 +352,7 @@ protected void serviceStart() throws Exception { } else if (timelineV2Client != null) { timelineV2Client.start(); } - eventHandlingThread = new Thread(new Runnable() { + eventHandlingThread = new HadoopThread(new Runnable() { @Override public void run() { JobHistoryEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index eb3583b41bc71..1663fc7e4316d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -134,6 +134,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -739,10 +740,10 @@ private class JobFinishEventHandler implements EventHandler { public void handle(JobFinishEvent event) { // Create a new thread to shutdown the AM. We should not do it in-line // to avoid blocking the dispatcher itself. - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { shutDownJob(); } }.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java index 13389d67efb71..c9c5e57e08cb1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java @@ -33,6 +33,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.util.Clock; import org.slf4j.Logger; @@ -125,7 +126,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - lostTaskCheckerThread = new Thread(new PingChecker()); + lostTaskCheckerThread = new HadoopThread(new PingChecker()); lostTaskCheckerThread.setName("TaskHeartbeatHandler PingChecker"); lostTaskCheckerThread.start(); super.serviceStart(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java index c72e13e648e15..82f28bf019a55 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java @@ -47,6 +47,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -126,7 +127,7 @@ protected void serviceStart() throws Exception { ThreadFactory backingTf = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread thread = new Thread(r); + Thread thread = new HadoopThread(r); thread.setContextClassLoader(jobClassLoader); return thread; } @@ -136,7 +137,7 @@ public Thread newThread(Runnable r) { ThreadFactory tf = tfBuilder.build(); launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new Thread(new Runnable() { + eventHandlingThread = new HadoopThread(new Runnable() { @Override public void run() { CommitterEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index d184d9be64bf8..0724fed2561be 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -43,6 +43,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -285,9 +286,9 @@ protected void serviceStart() throws Exception { Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new Thread() { + eventHandlingThread = new HadoopThread() { @Override - public void run() { + public void work() { ContainerLauncherEvent event = null; Set allNodes = new HashSet(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index b836120a8dcb4..d94793150daea 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; @@ -300,7 +301,7 @@ public void run() { } protected void startAllocatorThread() { - allocatorThread = new Thread(new AllocatorRunnable()); + allocatorThread = new HadoopThread(new AllocatorRunnable()); allocatorThread.setName("RMCommunicator Allocator"); allocatorThread.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index cf2f90ff1e563..ae1ad1a7eca8b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -62,6 +62,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -111,7 +112,7 @@ public class RMContainerAllocator extends RMContainerRequestor public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted " + "to make room for pending map attempts"; - private Thread eventHandlingThread; + private HadoopThread eventHandlingThread; private final AtomicBoolean stopped; static { @@ -246,10 +247,10 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - this.eventHandlingThread = new Thread() { + this.eventHandlingThread = new HadoopThread() { @SuppressWarnings("unchecked") @Override - public void run() { + public void work() { ContainerAllocatorEvent event; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java index 800ff1809704e..2a63dd876e595 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java @@ -45,6 +45,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; @@ -219,7 +220,7 @@ public void run() { } } }; - speculationBackgroundThread = new Thread + speculationBackgroundThread = new HadoopThread (speculationBackgroundCore, "DefaultSpeculator background processing"); speculationBackgroundThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java index 34f4c8c7164cf..717783964d805 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java @@ -39,6 +39,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -137,7 +138,7 @@ public void handle(ContainerAllocatorEvent event) { } @Override protected void serviceStart() throws Exception { - thread = new Thread(new Runnable() { + thread = new HadoopThread(new Runnable() { @Override @SuppressWarnings("unchecked") public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java index eef1a4a10835f..75b44642fa087 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java @@ -48,6 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -237,7 +238,7 @@ protected void unregister() { @Override protected void startAllocatorThread() { - allocatorThread = new Thread(); + allocatorThread = new HadoopThread(); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java index aae1fd0b673f6..fe4ca80b8c722 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java @@ -72,6 +72,7 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,7 +115,7 @@ public ProtocolSignature getProtocolSignature(String protocol, this, protocol, clientVersion, clientMethodsHash); } - private class Job extends Thread implements TaskUmbilicalProtocol { + private class Job extends HadoopThread implements TaskUmbilicalProtocol { // The job directory on the system: JobClient places job configurations here. // This is analogous to JobTracker's system directory. private Path systemJobDir; @@ -521,7 +522,7 @@ private void runTasks(List runnables, } @Override - public void run() { + public void work() { JobID jobId = profile.getJobID(); JobContext jContext = new JobContextImpl(job, jobId); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java index a40a40ada02d9..abbce2eda75bd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.HadoopThread; class CleanupQueue { @@ -100,7 +101,7 @@ protected boolean isQueueEmpty() { return (cleanupThread.queue.size() == 0); } - private static class PathCleanupThread extends Thread { + private static class PathCleanupThread extends HadoopThread { // cleanup queue which deletes files/directories of the paths queued up. private LinkedBlockingQueue queue = @@ -120,7 +121,7 @@ void addToQueue(PathDeletionContext[] contexts) { } } - public void run() { + public void work() { if (LOG.isDebugEnabled()) { LOG.debug(getName() + " started."); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 4f86f912838fa..5ebe61df6e967 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -74,6 +74,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1549,10 +1550,10 @@ public void flush() throws IOException, ClassNotFoundException, public void close() { } - protected class SpillThread extends Thread { + protected class SpillThread extends HadoopThread { @Override - public void run() { + public void work() { spillLock.lock(); spillThreadRunning = true; try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java index 6861f1b2cd36d..7820f49184d88 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java @@ -72,6 +72,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -952,7 +953,7 @@ void resetDoneFlag() { } public void startCommunicationThread() { if (pingThread == null) { - pingThread = new Thread(this, "communication thread"); + pingThread = new HadoopThread(this, "communication thread"); pingThread.setDaemon(true); pingThread.start(); } @@ -963,7 +964,7 @@ public void startDiskLimitCheckerThreadIfNeeded() { MRJobConfig.JOB_SINGLE_DISK_LIMIT_BYTES, MRJobConfig.DEFAULT_JOB_SINGLE_DISK_LIMIT_BYTES) >= 0) { try { - diskLimitCheckThread = new Thread(new DiskLimitCheck(conf), + diskLimitCheckThread = new HadoopThread(new DiskLimitCheck(conf), "disk limit check thread"); diskLimitCheckThread.setDaemon(true); diskLimitCheckThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java index 15b0961e57deb..a07dc0f1c394d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java @@ -57,6 +57,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -280,7 +281,7 @@ public static String createDigest(byte[] password, String data) } @VisibleForTesting - public static class PingSocketCleaner extends Thread { + public static class PingSocketCleaner extends HadoopThread { private final ServerSocket serverSocket; private final int soTimeout; @@ -291,7 +292,7 @@ public static class PingSocketCleaner extends Thread { } @Override - public void run() { + public void work() { LOG.info("PingSocketCleaner started..."); while (!Thread.currentThread().isInterrupted()) { Socket clientSocket = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java index 89c594a89b034..6b9530ba41261 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java @@ -42,6 +42,7 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,7 +96,7 @@ private enum MessageType { START(0), private static class UplinkReaderThread - extends Thread { + extends HadoopThread { private DataInputStream inStream; private UpwardProtocol handler; @@ -117,7 +118,7 @@ public void closeConnection() throws IOException { inStream.close(); } - public void run() { + public void work() { while (true) { try { if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java index 803ece7480c0d..3e51bac25b11a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.lib.map.WrappedMapper; import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * The Chain class provides all the common functionality for the @@ -296,7 +297,7 @@ private synchronized boolean setIfUnsetThrowable(Throwable th) { return false; } - private class MapRunner extends Thread { + private class MapRunner extends HadoopThread { private Mapper mapper; private Mapper.Context chainContext; private RecordReader rr; @@ -313,7 +314,7 @@ public MapRunner(Mapper mapper, } @Override - public void run() { + public void work() { if (getThrowable() != null) { return; } @@ -329,7 +330,7 @@ public void run() { } } - private class ReduceRunner extends Thread { + private class ReduceRunner extends HadoopThread { private Reducer reducer; private Reducer.Context chainContext; private RecordWriter rw; @@ -344,7 +345,7 @@ private class ReduceRunner extends Thread { } @Override - public void run() { + public void work() { try { reducer.run(chainContext); rw.close(chainContext); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java index 382ed959f12a0..c5b66dbd333e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.lib.map; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -247,7 +248,7 @@ public float getProgress() { } } - private class MapRunner extends Thread { + private class MapRunner extends HadoopThread { private Mapper mapper; private Context subcontext; private Throwable throwable; @@ -269,7 +270,7 @@ private class MapRunner extends Thread { } @Override - public void run() { + public void work() { try { mapper.run(subcontext); reader.close(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java index 0e28c3b41c02e..6fac8ff401714 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java @@ -23,10 +23,11 @@ import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class EventFetcher extends Thread { +class EventFetcher extends HadoopThread { private static final long SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; private static final int RETRY_PERIOD = 5000; @@ -56,7 +57,7 @@ public EventFetcher(TaskAttemptID reduce, } @Override - public void run() { + public void work() { int failures = 0; LOG.info(reduce + " Thread started: " + getName()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index 59ef95bdd462b..03a4569d40672 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -48,6 +48,7 @@ import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,7 +56,7 @@ import org.apache.hadoop.classification.VisibleForTesting; @VisibleForTesting -public class Fetcher extends Thread { +public class Fetcher extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(Fetcher.class); @@ -187,7 +188,7 @@ public Fetcher(JobConf job, TaskAttemptID reduceId, } } - public void run() { + public void work() { try { while (!stopped && !Thread.currentThread().isInterrupted()) { MapHost host = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java index dc563eeab4d0f..9ad5db1a0ba4d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java @@ -71,7 +71,7 @@ public LocalFetcher(JobConf job, TaskAttemptID reduceId, setDaemon(true); } - public void run() { + public void work() { // Create a worklist of task attempts to work over. Set maps = new HashSet(); for (TaskAttemptID map : localMapFiles.keySet()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java index c617569da33e8..1022b574f27df 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java @@ -26,10 +26,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -abstract class MergeThread extends Thread { +abstract class MergeThread extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(MergeThread.class); @@ -78,7 +79,7 @@ public synchronized void waitForMerge() throws InterruptedException { } } - public void run() { + public void work() { while (true) { List inputs = null; try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java index 173cd093e9f6e..be379cba18116 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java @@ -49,6 +49,7 @@ import org.apache.hadoop.mapreduce.task.reduce.MapHost.State; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -554,13 +555,13 @@ public int compareTo(Delayed o) { /** * A thread that takes hosts off of the penalty list when the timer expires. */ - private class Referee extends Thread { + private class Referee extends HadoopThread { public Referee() { setName("ShufflePenaltyReferee"); setDaemon(true); } - public void run() { + public void work() { try { while (true) { // take the first host that has an expired penalty diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java index 585a21d568231..7703fca7e61b5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -326,7 +327,7 @@ public static boolean isProcessGroupAlive(String pgrpId) { /** * Helper thread class that kills process-tree with SIGKILL in background */ - static class SigKillThread extends Thread { + static class SigKillThread extends HadoopThread { private String pid = null; private boolean isProcessGroup = false; @@ -339,7 +340,7 @@ private SigKillThread(String pid, boolean isProcessGroup, long interval) { sleepTimeBeforeSigKill = interval; } - public void run() { + public void work() { sigKillInCurrentThread(pid, isProcessGroup, sleepTimeBeforeSigKill); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java index e624b0304f166..d2f1a5fc69a5a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.mapreduce.MRJobConfig; import org.junit.jupiter.api.BeforeEach; @@ -221,9 +222,9 @@ public void testRemoveMap() throws Exception { // run multiple times for (int i = 0; i < 20; ++i) { - Thread getInfoThread = new Thread() { + Thread getInfoThread = new HadoopThread() { @Override - public void run() { + public void work() { try { cache.getIndexInformation("bigIndex", partsPerMap, big, user); } catch (Exception e) { @@ -231,9 +232,9 @@ public void run() { } } }; - Thread removeMapThread = new Thread() { + Thread removeMapThread = new HadoopThread() { @Override - public void run() { + public void work() { cache.removeMap("bigIndex"); } }; @@ -266,9 +267,9 @@ public void testCreateRace() throws Exception { // run multiple instances Thread[] getInfoThreads = new Thread[50]; for (int i = 0; i < 50; i++) { - getInfoThreads[i] = new Thread() { + getInfoThreads[i] = new HadoopThread() { @Override - public void run() { + public void work() { try { cache.getIndexInformation("racyIndex", partsPerMap, racy, user); cache.removeMap("racyIndex"); @@ -285,9 +286,9 @@ public void run() { final Thread mainTestThread = Thread.currentThread(); - Thread timeoutThread = new Thread() { + Thread timeoutThread = new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(15000); mainTestThread.interrupt(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java index 0d24cbc323a86..1b2aabaf72b74 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.test.AbstractHadoopTestBase; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -77,9 +78,9 @@ public boolean accept(Path path) { } }, true); - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { try { fetcher.getFileStatuses(); } catch (Exception e) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java index ef43beaa5f797..46aef150978e3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.checkpoint.TaskCheckpointID; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -252,7 +253,7 @@ public void uncaughtException(Thread th, Throwable ex) { task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); reporter.startDiskLimitCheckerThreadIfNeeded(); - Thread t = new Thread(reporter); + Thread t = new HadoopThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); t.start(); @@ -273,7 +274,7 @@ public void testTaskProgress() throws Exception { Task task = new DummyTask(); task.setConf(job); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new Thread(reporter); + Thread t = new HadoopThread(reporter); t.start(); Thread.sleep(2100); task.setTaskDone(); @@ -328,7 +329,7 @@ public void uncaughtException(Thread th, Throwable ex) { Task task = new DummyTask(); task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new Thread(reporter); + Thread t = new HadoopThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java index db7d6a980edb7..510a3469c9a63 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java @@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; @@ -184,9 +185,9 @@ public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() dfsCluster.getFileSystem().setSafeMode( SafeModeAction.ENTER); assertTrue(dfsCluster.getFileSystem().isInSafeMode()); - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(500); dfsCluster.getFileSystem().setSafeMode( @@ -209,9 +210,9 @@ public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout() assertTrue(dfsCluster.getFileSystem().isInSafeMode()); final ControlledClock clock = new ControlledClock(); clock.setTime(1); - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(500); clock.setTime(3000); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java index b3b66e560d940..68d4ba48cd950 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java @@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.service.Service; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.EventHandler; import org.junit.jupiter.api.Test; @@ -254,7 +255,7 @@ protected EventHandler createJobHistoryHandler( @Override protected void serviceStart() { // Don't start any event draining thread. - super.eventHandlingThread = new Thread(); + super.eventHandlingThread = new HadoopThread(); super.eventHandlingThread.start(); } }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java index 2a7a9f3c80bef..127e33b8cd68e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java @@ -26,6 +26,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.junit.jupiter.api.AfterAll; @@ -106,7 +107,7 @@ public void testTwoThreadsQueryingDifferentJobOfSameUser() * files in one child thread. */ createJhistFile(job1); - webRequest1 = new Thread( + webRequest1 = new HadoopThread( new Runnable() { @Override public void run() { @@ -136,7 +137,7 @@ public void run() { * will also see the job history files for job1. */ createJhistFile(job2); - webRequest2 = new Thread( + webRequest2 = new HadoopThread( new Runnable() { @Override public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java index 33a60681a35bf..91be7e6b9dbd4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Fails the Mapper. First attempt throws exception. Rest do System.exit. @@ -33,9 +34,9 @@ public void map(Text key, Text value, // Just create a non-daemon thread which hangs forever. MR AM should not be // hung by this. - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { synchronized (this) { try { wait(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java index 125dad5cbe14d..f1f28487e1aef 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java @@ -44,6 +44,7 @@ import org.apache.hadoop.mapred.*; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -178,7 +179,7 @@ enum StatSeries { public String toString() {return statName;} } - private static class FileCreateDaemon extends Thread { + private static class FileCreateDaemon extends HadoopThread { private static final int NUM_CREATE_THREADS = 10; private static volatile int numFinishedThreads; private static volatile int numRunningThreads; @@ -194,7 +195,7 @@ private static class FileCreateDaemon extends Thread { this.end = end; } - public void run() { + public void work() { try { for(int i=start; i < end; i++) { String name = getFileName(i); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java index 044c77c0853dd..83d84568598f9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java @@ -48,6 +48,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -308,7 +309,7 @@ public void configure(JobConf job) { getArgsFromConfiguration(jobConf); } - private class ProgressThread extends Thread { + private class ProgressThread extends HadoopThread { boolean keepGoing; // while this is true, thread runs. private Reporter reporter; @@ -318,7 +319,7 @@ public ProgressThread(final Reporter r) { this.keepGoing = true; } - public void run() { + public void work() { while (keepGoing) { if (!ProgressThread.interrupted()) { try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java index 303857bf70e0b..f956dfe89a781 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java @@ -39,6 +39,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -200,8 +201,8 @@ private void checkJobExitStatus(int status, String jobName) { private void runTest(final JobClient jc, final Configuration conf, final String jobClass, final String[] args, KillTaskThread killTaskThread, KillTrackerThread killTrackerThread) throws Exception { - Thread t = new Thread("Job Test") { - public void run() { + HadoopThread t = new HadoopThread("Job Test") { + public void work() { try { Class jobClassObj = conf.getClassByName(jobClass); int status = ToolRunner.run(conf, (Tool)(jobClassObj.newInstance()), @@ -249,7 +250,7 @@ public void run() { t.join(); } - private class KillTrackerThread extends Thread { + private class KillTrackerThread extends HadoopThread { private volatile boolean killed = false; private JobClient jc; private RunningJob rJob; @@ -281,7 +282,7 @@ public void setRunningJob(RunningJob rJob) { public void kill() { killed = true; } - public void run() { + public void work() { stopStartTrackers(true); if (!onlyMapsProgress) { stopStartTrackers(false); @@ -392,7 +393,7 @@ private String convertTrackerNameToHostName(String trackerName) { } - private class KillTaskThread extends Thread { + private class KillTaskThread extends HadoopThread { private volatile boolean killed = false; private RunningJob rJob; @@ -416,7 +417,7 @@ public void setRunningJob(RunningJob rJob) { public void kill() { killed = true; } - public void run() { + public void work() { killBasedOnProgress(true); if (!onlyMapsProgress) { killBasedOnProgress(false); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java index 83ea506bcd073..43c692c7dd6ea 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java @@ -21,6 +21,7 @@ import org.apache.hadoop.io.*; import org.apache.hadoop.mapred.UtilsForTests.RandomInputFormat; import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import java.io.*; @@ -51,14 +52,14 @@ public void map(Text key, Text val, final OutputCollector out, Reporter reporter) throws IOException { // Class for calling collect in separate threads - class CollectFeeder extends Thread { + class CollectFeeder extends HadoopThread { int id; // id for the thread public CollectFeeder(int id) { this.id = id; } - public void run() { + public void work() { for (int j = 1; j <= NUM_COLLECTS_PER_THREAD; j++) { try { out.collect(new IntWritable((id * NUM_COLLECTS_PER_THREAD) + j), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java index 7f26bb33e8179..2144d275aae6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -110,7 +111,7 @@ public static void doJobControlTest() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new Thread(theControl); + Thread theController = new HadoopThread(theControl); theController.start(); while (!theControl.allFinished()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java index 92a5868a56b9a..efa4c6d325670 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -115,7 +116,7 @@ public void testLocalJobControlDataCopy() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new Thread(theControl); + Thread theController = new HadoopThread(theControl); theController.start(); while (!theControl.allFinished()) { LOG.debug("Jobs in waiting state: " + theControl.getWaitingJobs().size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java index 46ac5cacae62a..311b9af7c3770 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java @@ -515,8 +515,8 @@ private static class SocketCleaner extends PingSocketCleaner { } @Override - public void run() { - super.run(); + public void work() { + super.work(); } protected void closeSocketInternal(Socket clientSocket) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java index d97ea5c8f7ae9..da4222e08269a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -317,8 +318,8 @@ public void testMultiMaps() throws Exception { FileOutputFormat.setOutputPath(job, outputPath); final Thread toInterrupt = Thread.currentThread(); - Thread interrupter = new Thread() { - public void run() { + HadoopThread interrupter = new HadoopThread() { + public void work() { try { Thread.sleep(120*1000); // 2m toInterrupt.interrupt(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java index 1bae2b0fe2c73..f9ef635183521 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java @@ -28,6 +28,7 @@ import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MapReduceTestUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -112,7 +113,7 @@ private JobControl createDependencies(Configuration conf, Job job1) theControl.addJob(cjob2); theControl.addJob(cjob3); theControl.addJob(cjob4); - Thread theController = new Thread(theControl); + Thread theController = new HadoopThread(theControl); theController.start(); return theControl; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java index 0b423797e5c82..4b48e50a876ed 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; /** @@ -148,7 +149,7 @@ private ControlledJob createFailedControlledJob(JobControl jobControl, } private void runJobControl(JobControl jobControl) { - Thread controller = new Thread(jobControl); + Thread controller = new HadoopThread(jobControl); controller.start(); waitTillAllFinished(jobControl); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index b3533482b525d..a82d07fc844e3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -45,6 +45,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JarFinder; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.MiniYARNCluster; @@ -266,8 +267,8 @@ public synchronized void serviceStart() throws Exception { } historyServer = new JobHistoryServer(); historyServer.init(getConfig()); - new Thread() { - public void run() { + new HadoopThread() { + public void work() { historyServer.start(); }; }.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java index 6a437b123c8ba..33a0088454fb5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java @@ -22,6 +22,7 @@ import org.apache.hadoop.mapred.Task.TaskReporter; import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +86,7 @@ public synchronized void start() { // init counters used by native side, // so they will have correct display name initUsedCounters(); - checker = new Thread(this); + checker = new HadoopThread(this); checker.setDaemon(true); checker.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java index 3ce6936c3d7dc..8d8ee453aa58e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java @@ -42,6 +42,7 @@ import org.apache.hadoop.util.IndexedSortable; import org.apache.hadoop.util.QuickSort; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.functional.FutureIO; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY; @@ -145,11 +146,11 @@ public static void writePartitionFile(final JobContext job, for(int i=0; i < samples; ++i) { final int idx = i; samplerReader[i] = - new Thread (threadGroup,"Sampler Reader " + idx) { + new HadoopThread (threadGroup, "Sampler Reader " + idx) { { setDaemon(true); } - public void run() { + public void work() { long records = 0; try { TaskAttemptContext context = new TaskAttemptContextImpl( diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java index 12a1cd7d8f63e..18bb0944792ab 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticsContext; import org.apache.hadoop.fs.statistics.impl.IOStatisticsContextImpl; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.functional.CloseableTaskPoolSubmitter; import org.apache.hadoop.util.functional.TaskPool; @@ -457,7 +458,7 @@ public void testListingThroughTaskPool() throws Throwable { * If constructed with an IOStatisticsContext then * that context is switched to before performing the IO. */ - private class TestWorkerThread extends Thread implements Runnable { + private class TestWorkerThread extends HadoopThread implements Runnable { private final Path workerThreadPath; private final IOStatisticsContext ioStatisticsContext; @@ -475,7 +476,7 @@ private class TestWorkerThread extends Thread implements Runnable { } @Override - public void run() { + public void work() { // Setting the worker thread's name. Thread.currentThread().setName("worker thread"); S3AFileSystem fs = getFileSystem(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java index 96c59c266a647..7a147df00e3e3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java @@ -37,7 +37,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.test.tags.ScaleTest; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -135,7 +135,7 @@ concurrentRenames, new ThreadFactory() { private AtomicInteger count = new AtomicInteger(0); public Thread newThread(Runnable r) { - return new Thread(r, + return new HadoopThread(r, "testParallelRename" + count.getAndIncrement()); } }); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java index 7cd1821c3da50..b3c9c800470fa 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java @@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -256,7 +257,7 @@ public AzureFileSystemThreadFactory(String prefix) { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); // Use current thread name as part in naming thread such that use of // same file system object will have unique names. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java index 4c8d5fb6a5f71..553ee4a969eb9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSExceptionMessages; @@ -821,7 +822,7 @@ class UploaderThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName(String.format("%s-%d", THREAD_ID_PREFIX, threadSequenceNumber.getAndIncrement())); return t; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 989c3ba6d9340..19c67006c3cbf 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azure; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.VisibleForTesting; import com.microsoft.azure.storage.AccessCondition; @@ -105,7 +105,7 @@ public SelfRenewingLease(CloudBlobWrapper blobWrapper, boolean throwIfPresent) } } } - renewer = new Thread(new Renewer()); + renewer = new HadoopThread(new Renewer()); // A Renewer running should not keep JVM from exiting, so make it a daemon. renewer.setDaemon(true); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java index d3fe4aefeb050..721ce68e837ff 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java @@ -22,6 +22,7 @@ import java.util.Date; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Internal implementation class to help calculate the current bytes @@ -67,7 +68,7 @@ public BandwidthGaugeUpdater(AzureFileSystemInstrumentation instrumentation, this.windowSizeMs = windowSizeMs; this.instrumentation = instrumentation; if (!manualUpdateTrigger) { - uploadBandwidthUpdater = new Thread(new UploadBandwidthUpdater(), THREAD_NAME); + uploadBandwidthUpdater = new HadoopThread(new UploadBandwidthUpdater(), THREAD_NAME); uploadBandwidthUpdater.setDaemon(true); uploadBandwidthUpdater.start(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java index 74f5aa4ffb573..56f2a2ba9c6ef 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.ROOT_PATH; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_LIST_MAX_RESULTS; @@ -151,7 +152,7 @@ public boolean listRecursiveAndTakeAction() Thread producerThread = null; try { ListBlobQueue listBlobQueue = createListBlobQueue(configuration); - producerThread = new Thread(() -> { + producerThread = new HadoopThread(() -> { try { produceConsumableList(listBlobQueue); } catch (AzureBlobFileSystemException e) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java index fe1ac3fa1f235..190d54dce9e5f 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java @@ -27,6 +27,7 @@ import java.util.concurrent.CountDownLatch; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.classification.VisibleForTesting; /** @@ -92,7 +93,7 @@ void init() { getFreeList().add(i); } for (int i = 0; i < NUM_THREADS; i++) { - Thread t = new Thread(new ReadBufferWorker(i, this)); + Thread t = new HadoopThread(new ReadBufferWorker(i, this)); t.setDaemon(true); threads[i] = t; t.setName("ABFS-prefetch-" + i); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java index 9cce860127dae..b15ea54646a63 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.util.concurrent.HadoopThread; final class ReadBufferManagerV2 extends ReadBufferManager { @@ -213,7 +214,7 @@ public void testMimicFullUseAndAddFailedBuffer(final ReadBuffer buf) { private int count = 0; @Override public Thread newThread(Runnable r) { - return new Thread(r, "ReadAheadV2-Thread-" + count++); + return new HadoopThread(r, "ReadAheadV2-Thread-" + count++); } }; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java index f28a15fd7149f..3fa9f49edd2db 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.azure.integration.AzureTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Handle OOB IO into a shared container. @@ -74,7 +75,7 @@ public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) { * Start writing blocks to Azure storage. */ public void startWriting() { - runner = new Thread(this); // Create the block writer thread. + runner = new HadoopThread(this); // Create the block writer thread. runner.start(); // Start the block writer thread. } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java index ab175ba6c5c15..a1fcf43972f97 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*; @@ -94,7 +95,7 @@ public void testMultiThreadedBlockBlobReadScenario() throws Throwable { Path testFilePath1 = new Path(base, "test1.dat"); Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -121,7 +122,7 @@ public void testMultiThreadBlockBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -142,7 +143,7 @@ public void testMultiThreadedPageBlobSetPermissionScenario() createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -161,7 +162,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -179,7 +180,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() public void testMultiThreadedPageBlobOpenScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { inputStream = fs.open(testPath); @@ -200,7 +201,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { @@ -219,7 +220,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -237,7 +238,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -253,7 +254,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createTestFolder(createTestAccount(), testFolderPath); - Thread t = new Thread(new DeleteThread(fs, testFolderPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -271,7 +272,7 @@ public void testMultiThreadedPageBlobListStatusScenario() throws Throwable { createTestFolder( getPageBlobTestStorageAccount(), testFolderPath); - Thread t = new Thread(new DeleteThread(fs, testFolderPath)); + Thread t = new HadoopThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -293,7 +294,7 @@ public void testMultiThreadedPageBlobReadScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -318,7 +319,7 @@ public void testMultiThreadedPageBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new HadoopThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java index 918866a73e5d7..261ba5b57512e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -67,7 +68,7 @@ public void testMultipleRenameFileOperationsToSameDestination() for (int i = 0; i < 10; i++) { final int threadNumber = i; Path src = path("test" + threadNumber); - threads.add(new Thread(() -> { + threads.add(new HadoopThread(() -> { try { latch.await(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e) { @@ -155,9 +156,9 @@ public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage() // Acquire the lease on the file in a background thread final CountDownLatch leaseAttemptComplete = new CountDownLatch(1); final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1); - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { // Acquire the lease and then signal the main test thread. SelfRenewingLease lease = null; try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java index ecf6e17b82aa7..415a612f4bcb1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; @@ -1643,9 +1644,9 @@ public void testLeaseAsDistributedLock() throws IllegalArgumentException, NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs; String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(LEASE_LOCK_FILE_KEY))); - Thread first = new Thread(new LeaseLockAction("first-thread", fullKey)); + Thread first = new HadoopThread(new LeaseLockAction("first-thread", fullKey)); first.start(); - Thread second = new Thread(new LeaseLockAction("second-thread", fullKey)); + Thread second = new HadoopThread(new LeaseLockAction("second-thread", fullKey)); second.start(); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java index f801f5e9ddae9..8e2feeb64448f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -149,7 +150,7 @@ public void testMultiThreadedOperation() throws Exception { final ConcurrentLinkedQueue exceptionsEncountered = new ConcurrentLinkedQueue(); for (int i = 0; i < numThreads; i++) { final Path threadLocalFile = new Path("/myFile" + i); - threads[i] = new Thread(new Runnable() { + threads[i] = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java index 187aa02cceb93..efff0c79c6b62 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java @@ -24,6 +24,7 @@ import java.util.Date; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; public class TestBandwidthGaugeUpdater { @@ -56,7 +57,7 @@ public void testMultiThreaded() throws Exception { new BandwidthGaugeUpdater(instrumentation, 1000, true); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(new Runnable() { + threads[i] = new HadoopThread(new Runnable() { @Override public void run() { updater.blockDownloaded(new Date(), new Date(), 10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index 0f7e6d9009b8a..3be5c555eb24d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.store.BlockUploadStatistics; import org.apache.hadoop.fs.store.DataBlocks; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; import static java.net.HttpURLConnection.HTTP_OK; @@ -1243,7 +1244,7 @@ public void testFlushSuccessWithConnectionResetOnResponseInvalidMd5() throws Exc out1.write(bytes1); //parallel flush call should lead to the first call failing because of md5 mismatch. - Thread parallelFlushThread = new Thread(() -> { + Thread parallelFlushThread = new HadoopThread(() -> { try { out1.hsync(); } catch (IOException e) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index e063f71e8c2da..cb87293eb6784 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -69,6 +69,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticAssertions; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.functional.FunctionRaisingIOE; import static java.net.HttpURLConnection.HTTP_CLIENT_TIMEOUT; @@ -1011,7 +1012,7 @@ public void testParallelRenameForAtomicRenameShouldFail() throws Exception { .acquireLease(Mockito.anyString(), Mockito.anyInt(), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new Thread(() -> { + new HadoopThread(() -> { while (!leaseAcquired.get()) {} try { fs.rename(src, dst); @@ -1061,7 +1062,7 @@ public void testAppendAtomicBlobDuringRename() throws Exception { return answer.callRealMethod(); }).when(client).copyBlob(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new Thread(() -> { + new HadoopThread(() -> { while (!copyInProgress.get()) {} try { os.write(1); diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java index 6b6596c38d821..ba6141fa6a032 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -367,7 +368,7 @@ private List readLines(File file) throws IOException { return lines; } - private static final class StreamPrinter extends Thread { + private static final class StreamPrinter extends HadoopThread { private final InputStream in; private final List lines; @@ -377,7 +378,7 @@ private StreamPrinter(InputStream in) { } @Override - public void run() { + public void work() { try (BufferedReader br = new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8))) { String line = br.readLine(); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java index 9c461cb18fb8a..ed22fc7e875a0 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.tools.util.WorkReport; import org.apache.hadoop.tools.util.WorkRequest; import org.apache.hadoop.tools.util.WorkRequestProcessor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -146,8 +147,8 @@ public void testMultipleProducerConsumerShutdown() // starts two thread: a source thread which put in work, and a sink thread // which takes a piece of work from ProducerConsumer - class SourceThread extends Thread { - public void run() { + class SourceThread extends HadoopThread { + public void work() { while (true) { try { worker.put(new WorkRequest(42)); @@ -161,8 +162,8 @@ public void run() { // The source thread put requests into producer-consumer. SourceThread source = new SourceThread(); source.start(); - class SinkThread extends Thread { - public void run() { + class SinkThread extends HadoopThread { + public void work() { try { while (true) { WorkReport report = worker.take(); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java index e44f811f0db41..63b376fd317bd 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java @@ -19,6 +19,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import java.io.IOException; import java.nio.ByteBuffer; @@ -549,7 +550,7 @@ public void onContainersAllocated(List allocatedContainers) { + container.getNodeHttpAddress() + ", containerResourceMemory=" + rsrc.getMemorySize() + ", containerResourceVirtualCores=" + rsrc.getVirtualCores()); - Thread launchThread = new Thread(containerLauncher); + Thread launchThread = new HadoopThread(containerLauncher); // launch and start the container on a separate thread to keep // the main thread unblocked diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java index 0c57542747e41..d969a4908d5b3 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java @@ -77,6 +77,7 @@ import org.apache.hadoop.util.ClassUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; @@ -891,7 +892,7 @@ private boolean monitorInfraApplication() throws YarnException, IOException { boolean loggedApplicationInfo = false; boolean success = false; - Thread namenodeMonitoringThread = new Thread(() -> { + Thread namenodeMonitoringThread = new HadoopThread(() -> { Supplier exitCritera = () -> Apps.isApplicationFinalState(infraAppState); Optional namenodeProperties = Optional.empty(); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java index f6c8a6ac4d58b..17742bbc6db7b 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java @@ -52,6 +52,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -319,7 +320,7 @@ static void waitForNameNodeReadiness(final Properties nameNodeProperties, .get(getNameNodeHdfsUri(nameNodeProperties), conf); log.info("Launching thread to trigger block reports for Datanodes with <" + blockThreshold + " blocks reported"); - Thread blockReportThread = new Thread(() -> { + Thread blockReportThread = new HadoopThread(() -> { // Here we count both Missing and UnderReplicated within under // replicated long lastUnderRepBlocks = Long.MAX_VALUE; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java index db34037da7806..19dcdd08f8fd6 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java @@ -57,6 +57,7 @@ import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -461,7 +462,7 @@ private Client createAndStartClient(Configuration localConf) { final Client client = new Client(JarFinder.getJar(ApplicationMaster.class), JarFinder.getJar(Assertions.class)); client.setConf(localConf); - Thread appThread = new Thread(() -> { + Thread appThread = new HadoopThread(() -> { try { client.run(new String[] {"-" + Client.MASTER_MEMORY_MB_ARG, "128", "-" + Client.CONF_PATH_ARG, confZip.toString(), diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java index 14e8c9cb82f16..25bc2445f614a 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java @@ -43,6 +43,7 @@ import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.REPLAYCOUNTERS; import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.ReplayCommand; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +58,7 @@ * are inserted by the {@link AuditReplayMapper}. Once an item is ready, this * thread will fetch the command from the queue and attempt to replay it. */ -public class AuditReplayThread extends Thread { +public class AuditReplayThread extends HadoopThread { private static final Logger LOG = LoggerFactory.getLogger(AuditReplayThread.class); @@ -154,7 +155,7 @@ Exception getException() { } @Override - public void run() { + public void work() { long currentEpoch = System.currentTimeMillis(); long delay = startTimestampMs - currentEpoch; try { diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java index 4d4e9a26b3de4..bf9a4c27916ee 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java @@ -36,6 +36,7 @@ import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -309,9 +310,9 @@ public void setJournal(BalanceJournal journal) { /** * This thread consumes the delayQueue and move the jobs to the runningQueue. */ - class Rooster extends Thread { + class Rooster extends HadoopThread { @Override - public void run() { + public void work() { while (running.get()) { try { DelayWrapper dJob = delayQueue.take(); @@ -327,9 +328,9 @@ public void run() { /** * This thread consumes the runningQueue and give the job to the workers. */ - class Reader extends Thread { + class Reader extends HadoopThread { @Override - public void run() { + public void work() { while (running.get()) { try { final BalanceJob job = runningQueue.poll(500, TimeUnit.MILLISECONDS); @@ -361,9 +362,9 @@ public void run() { * This thread consumes the recoverQueue, recovers the job the adds it to the * runningQueue. */ - class Recover extends Thread { + class Recover extends HadoopThread { @Override - public void run() { + public void work() { while (running.get()) { BalanceJob job = null; try { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java index e476223cf1e23..ce2778a6cd7a9 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java @@ -43,6 +43,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.tools.rumen.JobStoryProducer; import org.apache.hadoop.tools.rumen.ZombieJobProducer; import org.slf4j.Logger; @@ -627,7 +628,7 @@ private int setupDistCacheEmulation(Configuration conf, String traceIn, * pipeline abort its progress, waiting for each to exit and killing * any jobs still running on the cluster. */ - class Shutdown extends Thread { + class Shutdown extends HadoopThread { static final long FAC_SLEEP = 1000; static final long SUB_SLEEP = 4000; @@ -647,7 +648,7 @@ private void killComponent(Component component, long maxwait) { } @Override - public void run() { + public void work() { LOG.info("Exiting..."); try { killComponent(factory, FAC_SLEEP); // read no more tasks diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java index 0b06911be0857..5944d08dfd7f7 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java @@ -35,6 +35,7 @@ import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobStatus; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Component accepting submitted, running {@link Statistics.JobStats} and @@ -133,14 +134,14 @@ List getRemainingJobs() { * Monitoring thread pulling running jobs from the component and into * a queue to be polled for status. */ - private class MonitorThread extends Thread { + private class MonitorThread extends HadoopThread { public MonitorThread(int i) { super("GridmixJobMonitor-" + i); } @Override - public void run() { + public void work() { boolean graceful; boolean shutdown; while (true) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java index d1229ce2d8ff4..cbafa9ca9df17 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java @@ -41,6 +41,7 @@ import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.TaskInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import java.io.IOException; @@ -143,7 +144,7 @@ private void configure() { * This is a progress based resource usage matcher. */ @SuppressWarnings("unchecked") - static class ResourceUsageMatcherRunner extends Thread + static class ResourceUsageMatcherRunner extends HadoopThread implements Progressive { private final ResourceUsageMatcher matcher; private final BoostingProgress progress; @@ -199,7 +200,7 @@ protected void match() throws IOException, InterruptedException { } @Override - public void run() { + public void work() { LOG.info("Resource usage matcher thread started."); try { while (progress.getProgress() < 1) { @@ -234,7 +235,7 @@ void boost(float value) { // Makes sure that the TaskTracker doesn't kill the map/reduce tasks while // they are emulating - private static class StatusReporter extends Thread { + private static class StatusReporter extends HadoopThread { private final TaskAttemptContext context; private final Progressive progress; @@ -244,7 +245,7 @@ private static class StatusReporter extends Thread { } @Override - public void run() { + public void work() { LOG.info("Status reporter thread started."); try { while (!isInterrupted() && progress.getProgress() < 1) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java index fe3b5d36d9841..7c13e9e2c3665 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,14 +65,14 @@ public Thread createReaderThread() { public void update(Statistics.ClusterStats item) { } - private class ReplayReaderThread extends Thread { + private class ReplayReaderThread extends HadoopThread { public ReplayReaderThread(String threadName) { super(threadName); } - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java index cb05ab63f1c07..d5201f9384206 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; @@ -59,7 +60,7 @@ public Thread createReaderThread() { return new SerialReaderThread("SerialJobFactory"); } - private class SerialReaderThread extends Thread { + private class SerialReaderThread extends HadoopThread { public SerialReaderThread(String threadName) { super(threadName); @@ -78,7 +79,7 @@ public SerialReaderThread(String threadName) { * == */ @Override - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java index bf73f2a1faa55..4de17aee14517 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java @@ -28,6 +28,7 @@ import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; +import org.apache.hadoop.util.concurrent.HadoopThread; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -216,13 +217,13 @@ public void start() { statistics.start(); } - private class StatCollector extends Thread { + private class StatCollector extends HadoopThread { StatCollector() { super("StatsCollectorThread"); } - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java index 4e7fc9c2bbd80..bf5ea483e25c0 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java @@ -30,6 +30,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.HadoopThread; import java.io.IOException; import java.util.HashSet; @@ -136,7 +137,7 @@ public Thread createReaderThread() { * Worker thread responsible for reading descriptions, assigning sequence * numbers, and normalizing time. */ - private class StressReaderThread extends Thread { + private class StressReaderThread extends HadoopThread { public StressReaderThread(String name) { super(name); @@ -152,7 +153,7 @@ public StressReaderThread(String name) { * load the JT. * That is submit (Sigma(no of maps/Job)) > (2 * no of slots available) */ - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java index 23e1413fcad87..850e8ed9af0db 100644 --- a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java +++ b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java @@ -20,13 +20,14 @@ package org.apache.hadoop.resourceestimator.service; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Simple shutdown hook for {@link ResourceEstimatorServer}. */ -public class ShutdownHook extends Thread { +public class ShutdownHook extends HadoopThread { private static final Logger LOGGER = LoggerFactory.getLogger(ShutdownHook.class); private final ResourceEstimatorServer server; @@ -35,7 +36,7 @@ public class ShutdownHook extends Thread { this.server = server; } - public void run() { + public void work() { try { server.shutdown(); } catch (Exception e) { diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java index 3d6541565cb44..750b5dd371e97 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java @@ -38,7 +38,7 @@ import org.apache.hadoop.streaming.io.TextOutputReader; import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.ReflectionUtils; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.io.Text; /** Shared functionality for PipeMapper, PipeReducer. @@ -366,7 +366,7 @@ OutputReader createOutputReader(Class outputReaderClass) } - class MROutputThread extends Thread { + class MROutputThread extends HadoopThread { MROutputThread(OutputReader outReader, OutputCollector outCollector, Reporter reporter) { @@ -376,7 +376,7 @@ class MROutputThread extends Thread { this.reporter = reporter; } - public void run() { + public void work() { try { // 3/4 Tool to Hadoop while (outReader.readKeyValue()) { @@ -418,7 +418,7 @@ public void run() { } - class MRErrorThread extends Thread { + class MRErrorThread extends HadoopThread { public MRErrorThread() { this.reporterPrefix = job_.get("stream.stderr.reporter.prefix", "reporter:"); @@ -431,7 +431,7 @@ public void setReporter(Reporter reporter) { this.reporter = reporter; } - public void run() { + public void work() { Text line = new Text(); LineReader lineReader = null; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index d6ec1e4d4c516..d745d187c3b96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -69,6 +69,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; @@ -1761,7 +1762,7 @@ Thread createLaunchContainerThread(Container allocatedContainer, LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable(allocatedContainer, containerListener, shellId); - return new Thread(runnableLaunchContainer); + return new HadoopThread(runnableLaunchContainer); } private void publishContainerStartEventOnTimelineServiceV2( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java index 607a4c90d7e93..7fc42f73542f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java @@ -53,6 +53,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -333,7 +334,7 @@ protected void baseTestDSShell(String methodName, boolean haveDomain, boolean de assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread t = new Thread(() -> { + Thread t = new HadoopThread(() -> { try { result.set(dsClient.run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java index 81420465afb90..3ae65b3d0a27f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; @@ -69,7 +70,7 @@ protected Thread createLaunchContainerThread(Container allocatedContainer, threadsLaunched++; launchedContainers.add(allocatedContainer.getId()); yarnShellIds.add(shellId); - return new Thread(); + return new HadoopThread(); } void setNumTotalContainers(int numTotalContainers) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java index 6ef26ed1cce72..44018ad852e2b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -126,7 +127,7 @@ public void testDSShellWithEnforceExecutionType(TestInfo testInfo) throws Except try { setAndGetDSClient(new Configuration(getYarnClusterConfiguration())); getDSClient().init(args); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new HadoopThread(() -> { try { getDSClient().run(); } catch (Exception e) { @@ -220,7 +221,7 @@ private void doTestDistributedShellWithResources( assertTrue(getDSClient().init(args)); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new HadoopThread(() -> { try { result.set(getDSClient().run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java index 7ebc747ed2ea1..b04bd4ed56a78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.client.api.YarnClient; @@ -320,7 +321,7 @@ public void testDistributedShellWithAllocationTagNamespace( new Client( new Configuration(distShellTest.getYarnClusterConfiguration())); dsClient.init(argsA); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new HadoopThread(() -> { try { dsClient.run(); } catch (Exception e) { @@ -455,7 +456,7 @@ private void waitForExpectedNMsCount(int[] expectedNMCounts, /** * Monitor containers running on NMs. */ - class NMContainerMonitor extends Thread { + class NMContainerMonitor extends HadoopThread { // The interval of milliseconds of sampling (500ms) private final static int SAMPLING_INTERVAL_MS = 500; @@ -465,7 +466,7 @@ class NMContainerMonitor extends Thread { private volatile boolean isRunning = true; @Override - public void run() { + public void work() { while (isRunning) { for (int i = 0; i < NUM_NMS; i++) { int nContainers = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java index 37b3477c8c187..30e5162c20a37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -242,9 +243,9 @@ public void launchAM(ApplicationAttemptId attemptId) // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new Thread() { + Thread errThread = new HadoopThread() { @Override - public void run() { + public void work() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { @@ -256,9 +257,9 @@ public void run() { } } }; - Thread outThread = new Thread() { + Thread outThread = new HadoopThread() { @Override - public void run() { + public void work() { try { String line = inReader.readLine(); while((line != null) && !isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java index f971d7140aa44..613eb8a089424 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.service.SystemServiceManager; @@ -127,7 +128,7 @@ protected void serviceStart() throws Exception { launchUserService(syncUserServices); // Create a thread and submit services in background otherwise it // block RM switch time. - serviceLaucher = new Thread(createRunnable()); + serviceLaucher = new HadoopThread(createRunnable()); serviceLaucher.setName("System service launcher"); serviceLaucher.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java index 2a9bf8d5d975f..ebb766903c891 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -26,6 +26,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -154,9 +155,9 @@ public StopResponseProto stop(StopRequestProto requestProto) // Stop the service in 2 seconds delay to make sure this rpc call is completed. // shutdown hook will be executed which will stop AM gracefully. - Thread thread = new Thread() { + Thread thread = new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(2000); ExitUtil.terminate(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java index 0c0a53fecd4bd..8ce29f098fff3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.service.ServiceTestUtils; import org.apache.hadoop.yarn.service.api.records.Artifact; @@ -725,9 +726,9 @@ public void testNoServiceDependencies() { @Test public void testServiceDependencies() { - Thread thread = new Thread() { + HadoopThread thread = new HadoopThread() { @Override - public void run() { + public void work() { Service service = createExampleApplication(); Component compa = createComponent("compa"); Component compb = createComponent("compb"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java index 5656484fca126..89c01f9bf8d53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.eclipse.jetty.websocket.api.Session; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect; @@ -85,7 +86,7 @@ public void onClose(Session session, int status, String reason) { public void run() { try { Reader consoleReader = new Reader(); - Thread inputThread = new Thread(consoleReader, "consoleReader"); + Thread inputThread = new HadoopThread(consoleReader, "consoleReader"); inputThread.start(); while (mySession.isOpen()) { mySession.getRemote().flush(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java index 376c9dc1b05c1..2fda78d5de474 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.Container; @@ -293,12 +294,12 @@ public void updateTrackingUrl(String trackingUrl) { client.updateTrackingUrl(trackingUrl); } - private class HeartbeatThread extends Thread { + private class HeartbeatThread extends HadoopThread { public HeartbeatThread() { super("AMRM Heartbeater thread"); } - public void run() { + public void work() { while (true) { Object response = null; // synchronization ensures we don't send heartbeats after unregistering @@ -337,12 +338,12 @@ public void run() { } } - private class CallbackHandlerThread extends Thread { + private class CallbackHandlerThread extends HadoopThread { public CallbackHandlerThread() { super("AMRM Callback Handler Thread"); } - public void run() { + public void work() { while (true) { if (!keepRunning) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java index 4a4c50607dab7..e0e737017d777 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java @@ -59,6 +59,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,7 +74,7 @@ public class NMClientAsyncImpl extends NMClientAsync { protected ThreadPoolExecutor threadPool; protected int maxThreadPoolSize; - protected Thread eventDispatcherThread; + protected HadoopThread eventDispatcherThread; protected AtomicBoolean stopped = new AtomicBoolean(false); protected BlockingQueue events = new LinkedBlockingQueue(); @@ -151,9 +152,9 @@ protected void serviceStart() throws Exception { threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventDispatcherThread = new Thread() { + eventDispatcherThread = new HadoopThread() { @Override - public void run() { + public void work() { ContainerEvent event = null; Set allNodes = new HashSet(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index c16fe03b82a43..a3a536ad46391 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -63,6 +63,7 @@ import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.Priority; @@ -367,9 +368,9 @@ private static class QueueMetrics { long pendingContainers; } - private class KeyboardMonitor extends Thread { + private class KeyboardMonitor extends HadoopThread { - public void run() { + public void work() { Scanner keyboard = new Scanner(System.in, "UTF-8"); while (runKeyboardMonitor.get()) { String in = keyboard.next(); @@ -1229,7 +1230,7 @@ private String getCommandOutput(String[] command) throws IOException, private void addShutdownHook() { //clear screen when the program exits - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { clearScreen(); })); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index eff68990ea8a8..df21a87017f76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.concurrent.TimeoutException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; @@ -244,8 +245,8 @@ protected void verifyClientConnection() throws InterruptedException { } protected Thread createAndStartFailoverThread() { - Thread failoverThread = new Thread() { - public void run() { + HadoopThread failoverThread = new HadoopThread() { + public void work() { keepRunning = true; while (keepRunning) { if (cluster.getStartFailoverFlag()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java index 165569df4c736..0a1a75ab66ee2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; @@ -160,7 +161,7 @@ private void testProxyProvider(boolean facadeFlushCache) throws Exception { .getSubClusters(any(GetSubClustersInfoRequest.class)); threadResponse = null; - Thread thread = new Thread(new Runnable() { + Thread thread = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java index 38f220dea8cbe..ec99c7c505f0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java @@ -29,6 +29,7 @@ import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.tools.GetGroupsTestBase; import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.junit.jupiter.api.AfterAll; @@ -70,8 +71,8 @@ public void stateChanged(Service service) { resourceManager.registerServiceListener(rmStateChangeListener); resourceManager.init(conf); - new Thread() { - public void run() { + new HadoopThread() { + public void work() { resourceManager.start(); }; }.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java index f0aedf622ec76..2aabe51250eab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; @@ -107,8 +108,8 @@ private void validateActiveRM(YarnClient client) throws IOException { } private void makeRMActive(final MiniYARNCluster cluster, final int index) { - Thread t = new Thread() { - @Override public void run() { + HadoopThread t = new HadoopThread() { + @Override public void work() { try { System.out.println("Transition rm" + index + " to active"); cluster.getResourceManager(index).getRMContext().getRMAdminService() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java index 963d01b4c90ff..4b64e68908a05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java @@ -41,6 +41,7 @@ import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -402,7 +403,7 @@ public void testUncaughtExceptionHandlerWithHAEnabled() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new Thread(new Runnable() { + final Thread testThread = new HadoopThread(new Runnable() { @Override public void run() { throw rte; @@ -446,7 +447,7 @@ public void testUncaughtExceptionHandlerWithoutHA() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new Thread(new Runnable() { + final Thread testThread = new HadoopThread(new Runnable() { @Override public void run() { throw rte; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java index dfdd8aa53fa29..925712988a0d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java @@ -26,6 +26,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.service.ServiceStateChangeListener; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.DecommissionType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -97,8 +98,8 @@ public void stateChanged(Service service) { resourceManager.registerServiceListener(rmStateChangeListener); resourceManager.init(configuration); - new Thread() { - public void run() { + new HadoopThread() { + public void work() { resourceManager.start(); } }.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java index 1a8b8f5040362..e8aac07520859 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java @@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -782,9 +783,9 @@ public void testOutOfOrder() throws Exception { recordFactory.newRecordInstance(ContainerLaunchContext.class); // start container from another thread - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { asyncClient.startContainerAsync(container, clc); } }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index f0da771332124..1cc1d3886a86a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -23,6 +23,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; @@ -291,10 +292,10 @@ public void testSubmitApplicationInterrupted(SchedulerType type) throws IOExcept client.start(); // Submit the application and then interrupt it while its waiting // for submission to be successful. - final class SubmitThread extends Thread { + final class SubmitThread extends HadoopThread { private boolean isInterrupted = false; @Override - public void run() { + public void work() { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); ApplicationId applicationId = ApplicationId.newInstance( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index a3436f7bbe0ba..d71b06a3f94af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -42,6 +42,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -218,7 +219,7 @@ protected void serviceInit(Configuration conf) throws Exception{ protected void serviceStart() throws Exception { //start all the components super.serviceStart(); - eventHandlingThread = new Thread(createThread()); + eventHandlingThread = new HadoopThread(createThread()); eventHandlingThread.setName(dispatcherThreadName); eventHandlingThread.start(); } @@ -284,7 +285,7 @@ protected void dispatch(Event event) { && (ShutdownHookManager.get().isShutdownInProgress()) == false && stopped == false) { stopped = true; - Thread shutDownThread = new Thread(createShutDownThread()); + Thread shutDownThread = new HadoopThread(createShutDownThread()); shutDownThread.setName("AsyncDispatcher ShutDown handler"); shutDownThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index 647ab6e9481f5..e4a7024c8e47b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -28,6 +28,7 @@ import org.slf4j.MarkerFactory; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import java.util.concurrent.BlockingQueue; @@ -105,7 +106,7 @@ public void run() { public EventDispatcher(EventHandler handler, String name) { super(name); this.handler = handler; - this.eventProcessor = new Thread(new EventProcessor()); + this.eventProcessor = new HadoopThread(new EventProcessor()); this.eventProcessor.setName(getName() + ":Event Processor"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java index 0ae6c47d0ecd7..809aa8e34ff63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * A simple liveliness monitor with which clients can register, trust the @@ -66,7 +67,7 @@ public AbstractLivelinessMonitor(String name) { protected void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; resetTimer(); - checkerThread = new Thread(new PingChecker()); + checkerThread = new HadoopThread(new PingChecker()); checkerThread.setName("Ping Checker for "+getName()); checkerThread.start(); super.serviceStart(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java index e0201cfcd1557..aaf13ca12761e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java @@ -21,6 +21,7 @@ import org.junit.jupiter.api.Test; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import static org.junit.jupiter.api.Assertions.assertSame; @@ -44,7 +45,7 @@ void testUncaughtExceptionHandlerWithRuntimeException() final YarnUncaughtExceptionHandler spyYarnHandler = spy(exHandler); final YarnRuntimeException yarnException = new YarnRuntimeException( "test-yarn-runtime-exception"); - final Thread yarnThread = new Thread(new Runnable() { + final Thread yarnThread = new HadoopThread(new Runnable() { @Override public void run() { throw yarnException; @@ -74,7 +75,7 @@ void testUncaughtExceptionHandlerWithError() ExitUtil.disableSystemExit(); final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler); final java.lang.Error error = new java.lang.Error("test-error"); - final Thread errorThread = new Thread(new Runnable() { + final Thread errorThread = new HadoopThread(new Runnable() { @Override public void run() { throw error; @@ -103,7 +104,7 @@ void testUncaughtExceptionHandlerWithOutOfMemoryError() ExitUtil.disableSystemHalt(); final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler); final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error"); - final Thread oomThread = new Thread(new Runnable() { + final Thread oomThread = new HadoopThread(new Runnable() { @Override public void run() { throw oomError; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java index 054e751ff6436..3028419bd61cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java @@ -55,6 +55,7 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.TestContainerId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -174,8 +175,8 @@ private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long l final CountDownLatch latch = new CountDownLatch(1); - Thread t = new Thread() { - public void run() { + HadoopThread t = new HadoopThread() { + public void work() { try { for (int i = 0; i < length / 3; i++) { osw.write(ch); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java index 09dfb92f1d0e6..d576c08df13b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java @@ -47,6 +47,7 @@ import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.MemInfo; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.ProcessSmapMemoryInfo; @@ -75,8 +76,8 @@ public class TestProcfsBasedProcessTree { private static final int N = 6; // Controls the RogueTask - private class RogueTaskThread extends Thread { - public void run() { + private class RogueTaskThread extends HadoopThread { + public void work() { try { Vector args = new Vector(); if (isSetsidAvailable()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java index 78741720a171d..0e1ca1be583a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.commons.collections4.map.LRUMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -283,7 +284,7 @@ public StartAndInsertTime(long startTime, long insertTime) { } } - private class EntityDeletionThread extends Thread { + private class EntityDeletionThread extends HadoopThread { private final long ttl; private final long ttlInterval; @@ -298,7 +299,7 @@ public EntityDeletionThread(Configuration conf) { } @Override - public void run() { + public void work() { while (true) { long timestamp = System.currentTimeMillis() - ttl; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index 97ff86ede271b..cd7b9cb77e75b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import java.io.IOException; import java.util.ArrayList; @@ -389,7 +390,7 @@ protected void serviceStop() throws Exception { super.serviceStop(); } - private class EntityDeletionThread extends Thread { + private class EntityDeletionThread extends HadoopThread { private final long ttl; private final long ttlInterval; @@ -404,7 +405,7 @@ private class EntityDeletionThread extends Thread { } @Override - public void run() { + public void work() { Thread.currentThread().setName("Leveldb Timeline Store Retention"); while (true) { long timestamp = System.currentTimeMillis() - ttl; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java index cb59d41505deb..f2110da05eccf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java @@ -34,13 +34,14 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; /** * Extends Thread and provides an implementation that is used for processing the * AM heart beat request asynchronously and sending back the response using the * callback method registered with the system. */ -public class AMHeartbeatRequestHandler extends Thread { +public class AMHeartbeatRequestHandler extends HadoopThread { public static final Logger LOG = LoggerFactory.getLogger(AMHeartbeatRequestHandler.class); @@ -83,7 +84,7 @@ public void shutdown() { } @Override - public void run() { + public void work() { while (keepRunning) { AsyncAllocateRequestInfo requestInfo; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java index 0ff4260c5e358..f1a385d2b1452 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java @@ -37,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -105,7 +106,7 @@ protected void serviceStart() throws Exception { protected void serviceStop() throws Exception { if (!this.unmanagedAppMasterMap.isEmpty()) { - finishApplicationThread = new Thread(createForceFinishApplicationThread()); + finishApplicationThread = new HadoopThread(createForceFinishApplicationThread()); finishApplicationThread.setName(dispatcherThreadName); finishApplicationThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java index bb6944e1034e0..9e0a899d718f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java @@ -33,6 +33,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; @@ -228,7 +229,7 @@ public void testSlowRegisterCall() throws YarnException, IOException, InterruptedException { // Register with wait() in RM in a separate thread - Thread registerAMThread = new Thread(new Runnable() { + Thread registerAMThread = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -486,10 +487,10 @@ public TestableAMRequestHandlerThread(Configuration conf, } @Override - public void run() { + public void work() { try { getUGIWithToken(attemptId).doAs((PrivilegedExceptionAction) () -> { - TestableAMRequestHandlerThread.super.run(); + TestableAMRequestHandlerThread.super.work(); return null; }); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 76d3439575c55..ff941fcfe2112 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDERR; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDOUT; @@ -851,7 +852,7 @@ public String getProcessId(ContainerId containerID) { * This class will signal a target container after a specified delay. * @see #signalContainer */ - public static class DelayedProcessKiller extends Thread { + public static class DelayedProcessKiller extends HadoopThread { private final Container container; private final String user; private final String pid; @@ -883,7 +884,7 @@ public DelayedProcessKiller(Container container, String user, String pid, } @Override - public void run() { + public void work() { try { Thread.sleep(delay); containerExecutor.signalContainer(new ContainerSignalContext.Builder() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 6110e624f8d37..4019efc3c7ca0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -534,9 +535,9 @@ public String getName() { } protected void shutDown(final int exitCode) { - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { NodeManager.this.stop(); } catch (Throwable t) { @@ -559,9 +560,9 @@ protected void resyncWithRM() { // Some other thread is already created for resyncing, do nothing } else { // We have got the lock, create a new thread - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { if (!rmWorkPreservingRestartEnabled) { LOG.info("Cleaning up running containers on resync"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index 37fa33e14fcce..b83fe4944a36f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.api.records.ResourceUtilization; @@ -149,7 +150,7 @@ protected void serviceStop() throws Exception { /** * Thread that monitors the resource utilization of this node. */ - private class MonitoringThread extends Thread { + private class MonitoringThread extends HadoopThread { /** * Initialize the node resource monitoring thread. */ @@ -162,7 +163,7 @@ public MonitoringThread() { * Periodically monitor the resource utilization of the node. */ @Override - public void run() { + public void work() { while (true) { // Get node utilization and save it into the health status long pmem = resourceCalculatorPlugin.getPhysicalMemorySize() - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 5da709c49dc2b..a12742e4eae90 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -44,6 +44,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -329,7 +330,7 @@ protected void rebootNodeStatusUpdaterAndRegisterWithRM() { try { statusUpdater.join(); registerWithRM(); - statusUpdater = new Thread(statusUpdaterRunnable, "Node Status Updater"); + statusUpdater = new HadoopThread(statusUpdaterRunnable, "Node Status Updater"); this.isStopped = false; statusUpdater.start(); LOG.info("NodeStatusUpdater thread is reRegistered and restarted"); @@ -828,7 +829,7 @@ private static Map parseCredentials( protected void startStatusUpdater() { statusUpdaterRunnable = new StatusUpdaterRunnable(); statusUpdater = - new Thread(statusUpdaterRunnable, "Node Status Updater"); + new HadoopThread(statusUpdaterRunnable, "Node Status Updater"); statusUpdater.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index 78ba39ef69380..e5fcf92ae7e52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -52,6 +52,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; @@ -497,10 +498,10 @@ public void validateResult() throws IOException { private Thread startStreamReader(final InputStream stream) throws IOException { - Thread streamReaderThread = new Thread() { + Thread streamReaderThread = new HadoopThread() { @Override - public void run() { + public void work() { try (BufferedReader lines = new BufferedReader( new InputStreamReader(stream, StandardCharsets.UTF_8))) { char[] buf = new char[512]; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index cdd9fc916e339..6c9a0e69583cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -1749,9 +1750,9 @@ private void doRelaunch(final ContainerImpl container, container.sendRelaunchEvent(); } else { // wait for some time, then send launch event - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(retryInterval); container.sendRelaunchEvent(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java index e942983e01168..f381b2a514a09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java @@ -23,6 +23,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -55,7 +56,7 @@ * events of all the containers together, and if we go over the limit picks * a container to kill. The algorithm that picks the container is a plugin. */ -public class CGroupElasticMemoryController extends Thread { +public class CGroupElasticMemoryController extends HadoopThread { protected static final Logger LOG = LoggerFactory .getLogger(CGroupElasticMemoryController.class); private final Clock clock = new MonotonicClock(); @@ -238,7 +239,7 @@ public static boolean isAvailable() { * reasons. */ @Override - public void run() { + public void work() { ExecutorService executor = null; try { // Disable OOM killer and set a limit. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index a7f0722e66f8e..74cbc90124876 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -80,6 +80,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -861,7 +862,7 @@ private static ExecutorService createLocalizerExecutor(Configuration conf) { } - class PublicLocalizer extends Thread { + class PublicLocalizer extends HadoopThread { final FileContext lfs; final Configuration conf; @@ -975,7 +976,7 @@ private void createDir(Path dirPath, FsPermission perms) } @Override - public void run() { + public void work() { try { // TODO shutdown, better error handling esp. DU while (!Thread.currentThread().isInterrupted()) { @@ -1030,7 +1031,7 @@ public void run() { * access to user's credentials. One {@link LocalizerRunner} per localizerId. * */ - class LocalizerRunner extends Thread { + class LocalizerRunner extends HadoopThread { final LocalizerContext context; final String localizerId; @@ -1254,7 +1255,7 @@ private Path getPathForLocalization(LocalResource rsrc, @Override @SuppressWarnings("unchecked") // dispatcher not typed - public void run() { + public void work() { Path nmPrivateCTokensPath = null; Throwable exception = null; try { @@ -1405,7 +1406,7 @@ static String buildTokenFingerprint(Token tk) return fingerprint.toString(); } - static class CacheCleanup extends Thread { + static class CacheCleanup extends HadoopThread { private final Dispatcher dispatcher; @@ -1416,7 +1417,7 @@ public CacheCleanup(Dispatcher dispatcher) { @Override @SuppressWarnings("unchecked") // dispatcher not typed - public void run() { + public void work() { dispatcher.getEventHandler().handle( new LocalizationEvent(LocalizationEventType.CACHE_CLEANUP)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 0b4bd4a3fbd81..608801d5e3cc6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; @@ -489,13 +490,13 @@ boolean isProcessTreeOverLimit(ResourceCalculatorProcessTree pTree, curMemUsageOfAgedProcesses, limit); } - private class MonitoringThread extends Thread { + private class MonitoringThread extends HadoopThread { MonitoringThread() { super("Container Monitor"); } @Override - public void run() { + public void work() { while (!stopped && !Thread.currentThread().isInterrupted()) { long start = Time.monotonicNow(); @@ -884,13 +885,13 @@ private String formatUsageString(long currentVmemUsage, long vmemLimit, } } - private class LogMonitorThread extends Thread { + private class LogMonitorThread extends HadoopThread { LogMonitorThread() { super("Container Log Monitor"); } @Override - public void run() { + public void work() { while (!stopped && !Thread.currentThread().isInterrupted()) { for (Entry entry : trackingContainers.entrySet()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java index 86e1379223bc5..9d5ecfb27d8b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java @@ -66,6 +66,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -462,8 +463,8 @@ public void testContainerKill() throws Exception { assumeTrue(shouldRun()); final ContainerId sleepId = getNextContainerId(); - Thread t = new Thread() { - public void run() { + HadoopThread t = new HadoopThread() { + public void work() { try { runAndBlock(sleepId, "sleep", "100"); } catch (IOException|ConfigurationException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index ad7a1e7776cdf..e6b6680de4a67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -48,6 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -744,9 +745,9 @@ protected void rebootNodeStatusUpdaterAndRegisterWithRM() { } } - class ContainerUpdateResourceThread extends Thread { + class ContainerUpdateResourceThread extends HadoopThread { @Override - public void run() { + public void work() { // Construct container resource increase request List increaseTokens = new ArrayList(); // Add increase request. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 89010bb3342e9..dbcf61ec7e809 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -69,6 +69,7 @@ import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1173,7 +1174,7 @@ protected NodeStatusUpdater createNodeStatusUpdater(Context context, assertTrue(lastService instanceof NodeStatusUpdater, "last service is NOT the node status updater"); - Thread starterThread = new Thread(() -> { + Thread starterThread = new HadoopThread(() -> { try { nm.start(); } catch (Throwable e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java index 5172e12b64e7b..984edf296bc8a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java @@ -288,11 +288,11 @@ public TestableAMRequestHandlerThread(Configuration conf, } @Override - public void run() { + public void work() { try { getUGIWithToken(getAttemptId()) .doAs((PrivilegedExceptionAction) () -> { - TestableAMRequestHandlerThread.super.run(); + TestableAMRequestHandlerThread.super.work(); return null; }); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java index a30a13f0a132e..5c32c4ceccbdc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java @@ -74,6 +74,7 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; @@ -321,9 +322,9 @@ public void testMultipleLocalizers() throws Exception { FakeContainerLocalizer localizerB = testB.init(); // run localization - Thread threadA = new Thread() { + HadoopThread threadA = new HadoopThread() { @Override - public void run() { + public void work() { try { localizerA.runLocalization(nmAddr); } catch (Exception e) { @@ -331,9 +332,9 @@ public void run() { } } }; - Thread threadB = new Thread() { + HadoopThread threadB = new HadoopThread() { @Override - public void run() { + public void work() { try { localizerB.runLocalization(nmAddr); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java index 3b7d3011f91e3..054494bfd8453 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.util; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; @@ -114,9 +115,9 @@ public void testDeleteCgroup() throws Exception { fos.close(); final CountDownLatch latch = new CountDownLatch(1); - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { latch.countDown(); try { Thread.sleep(200); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index b35f8cb295cae..4fda747e9c824 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -57,6 +57,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1135,7 +1136,7 @@ private class SchedulerEventDispatcher extends SchedulerEventDispatcher(String name, int samplesPerMin) { super(scheduler, name); this.eventProcessorMonitor = - new Thread(new EventProcessorMonitor(getEventProcessorId(), + new HadoopThread(new EventProcessorMonitor(getEventProcessorId(), samplesPerMin)); this.eventProcessorMonitor .setName("ResourceManager Event Processor Monitor"); @@ -1220,7 +1221,7 @@ protected void serviceStop() throws Exception { */ private void handleTransitionToStandByInNewThread() { Thread standByTransitionThread = - new Thread(activeServices.standByTransitionRunnable); + new HadoopThread(activeServices.standByTransitionRunnable); standByTransitionThread.setName("StandByTransitionThread"); standByTransitionThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java index 9f4de2868a1fd..928dc74ff21ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java @@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -105,14 +106,14 @@ protected void serviceStop() throws Exception { launcherPool.shutdown(); } - private class LauncherThread extends Thread { + private class LauncherThread extends HadoopThread { public LauncherThread() { super("ApplicationMaster Launcher"); } @Override - public void run() { + public void work() { while (!this.isInterrupted()) { Runnable toLaunch; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java index 6384736d62e11..7f6424d3e50b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java @@ -31,6 +31,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -597,7 +598,7 @@ public int incrementCurrentKeyId() { */ public void createCleanUpFinishApplicationThread(String stage) { String threadName = cleanUpThreadNamePrefix + "-" + stage; - Thread finishApplicationThread = new Thread(createCleanUpFinishApplicationThread()); + Thread finishApplicationThread = new HadoopThread(createCleanUpFinishApplicationThread()); finishApplicationThread.setName(threadName); finishApplicationThread.start(); LOG.info("CleanUpFinishApplicationThread has been started {}.", threadName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java index f1b80a946a7d0..09e8e4c872126 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java @@ -29,6 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -536,13 +537,13 @@ public void handle(TimelineV1PublishEvent event) { } } - private class PutEventThread extends Thread { + private class PutEventThread extends HadoopThread { PutEventThread() { super("PutEventThread"); } @Override - public void run() { + public void work() { LOG.info("System metrics publisher will put events every " + String.valueOf(putEventInterval) + " milliseconds"); while (!stopped && !Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index b7d1220bf9f2d..e132b54c1d01d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -27,6 +27,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.classification.VisibleForTesting; @@ -70,7 +71,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index f0990cf8fb0a6..241d16225982e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -30,6 +30,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.curator.ZKCuratorManager.SafeTransaction; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -1468,13 +1469,13 @@ public void safeDeleteAndCheckNode(String path, List fencingACL, * Helper class that periodically attempts creating a znode to ensure that * this RM continues to be the Active. */ - private class VerifyActiveStatusThread extends Thread { + private class VerifyActiveStatusThread extends HadoopThread { VerifyActiveStatusThread() { super(VerifyActiveStatusThread.class.getName()); } @Override - public void run() { + public void work() { try { while (!isFencedState()) { // Create and delete fencing node diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 6010bd21a186e..0f6e7999a9e55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -45,6 +45,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1238,9 +1239,9 @@ public RMAppAttemptState transition(RMAppAttemptImpl appAttempt, private void retryFetchingAMContainer(final RMAppAttemptImpl appAttempt) { // start a new thread so that we are not blocking main dispatcher thread. - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { try { Thread.sleep(500); } catch (InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 3343c5f93118d..7bfe8f93d77f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -117,6 +117,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.util.concurrent.HadoopThread; @SuppressWarnings("unchecked") @@ -1716,9 +1717,9 @@ public void update() { * Thread which calls {@link #update()} every * updateInterval milliseconds. */ - private class UpdateThread extends Thread { + private class UpdateThread extends HadoopThread { @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { synchronized (updateThreadMonitor) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index 001c638801bf5..b9693e153bb65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -21,6 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -295,7 +296,7 @@ private void dynamicallyUpdateAppActivitiesMaxQueueLengthIfNeeded() { @Override protected void serviceStart() throws Exception { - cleanUpThread = new Thread(new Runnable() { + cleanUpThread = new HadoopThread(new Runnable() { @Override public void run() { while (!stopped && !Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 02ffe83a6df7c..1f26e24abcb02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -53,6 +53,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -638,7 +639,7 @@ public void setAsyncSchedulingConf(AsyncSchedulingConfiguration conf) { this.asyncSchedulingConf = conf; } - static class AsyncScheduleThread extends Thread { + static class AsyncScheduleThread extends HadoopThread { private final CapacityScheduler cs; private AtomicBoolean runSchedules = new AtomicBoolean(false); @@ -650,7 +651,7 @@ public AsyncScheduleThread(CapacityScheduler cs) { } @Override - public void run() { + public void work() { int debuggingLogCounter = 0; while (!Thread.currentThread().isInterrupted()) { try { @@ -691,7 +692,7 @@ public void suspendSchedule() { } - static class ResourceCommitterService extends Thread { + static class ResourceCommitterService extends HadoopThread { private final CapacityScheduler cs; private BlockingQueue> backlogs = new LinkedBlockingQueue<>(); @@ -702,7 +703,7 @@ public ResourceCommitterService(CapacityScheduler cs) { } @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { ResourceCommitRequest request = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 7fab417d893cb..85e707b2f3ea2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -38,6 +38,7 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.XMLUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.security.Permission; @@ -118,7 +119,7 @@ public void serviceInit(Configuration conf) throws Exception { this.allocFile = getAllocationFile(conf); if (this.allocFile != null) { this.fs = allocFile.getFileSystem(conf); - reloadThread = new Thread(() -> { + reloadThread = new HadoopThread(() -> { while (running) { try { synchronized (this) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java index 221bb17ae5ba3..8047a96846690 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java @@ -19,6 +19,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Resource; @@ -39,7 +40,7 @@ /** * Thread that handles FairScheduler preemption. */ -class FSPreemptionThread extends Thread { +class FSPreemptionThread extends HadoopThread { private static final Logger LOG = LoggerFactory. getLogger(FSPreemptionThread.class); protected final FSContext context; @@ -71,7 +72,7 @@ class FSPreemptionThread extends Thread { } @Override - public void run() { + public void work() { while (!Thread.interrupted()) { try { FSAppAttempt starvedApp = context.getStarvedApps().take(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index a3e3ddfafe39f..6a15393d9cbfe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -100,6 +100,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.slf4j.Logger; @@ -315,10 +316,10 @@ public QueueManager getQueueManager() { * asynchronous to the node heartbeats. */ @Deprecated - private class ContinuousSchedulingThread extends Thread { + private class ContinuousSchedulingThread extends HadoopThread { @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { continuousSchedulingAttempt(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java index 38af12719efa0..d8251a384690f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -99,7 +100,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new HadoopThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index 8e1214afc8095..271ae991746de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -62,6 +62,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AbstractEvent; @@ -200,7 +201,7 @@ protected void serviceStart() throws Exception { dtCancelThread.start(); if (tokenKeepAliveEnabled) { delayedRemovalThread = - new Thread(new DelayedTokenRemovalRunnable(getConfig()), + new HadoopThread(new DelayedTokenRemovalRunnable(getConfig()), "DelayedTokenCanceller"); delayedRemovalThread.start(); } @@ -347,7 +348,7 @@ public int hashCode() { } - private static class DelegationTokenCancelThread extends Thread { + private static class DelegationTokenCancelThread extends HadoopThread { private static class TokenWithConf { Token token; Configuration conf; @@ -377,7 +378,7 @@ public void cancelToken(Token token, } } - public void run() { + public void work() { TokenWithConf tokenWithConf = null; while (true) { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java index 4391c4177bb50..b39e2a6095171 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java @@ -31,6 +31,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.Service.STATE; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -89,8 +90,8 @@ this.queueACLsManager, getRMContext() protected void doSecureLogin() throws IOException { } }; - new Thread() { - public void run() { + new HadoopThread() { + public void work() { resourceManager.start(); }; }.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index 7d7330ed9cf7f..642f6d6a35dba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -55,6 +55,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.Service.STATE; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; @@ -150,8 +151,8 @@ protected ClientRMService createClientRMService() { this.queueACLsManager, null); }; }; - new Thread() { - public void run() { + new HadoopThread() { + public void work() { UserGroupInformation.createUserForTesting(ENEMY, new String[] {}); UserGroupInformation.createUserForTesting(FRIEND, new String[] { FRIENDLY_GROUP }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 677b8ab5349a8..df56378637fe8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -71,6 +71,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Sets; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; @@ -1532,9 +1533,9 @@ public void handle(Event rawEvent) { rmService.init(new Configuration()); // submit an app and wait for it to block while in app submission - Thread t = new Thread() { + HadoopThread t = new HadoopThread() { @Override - public void run() { + public void work() { try { rmService.submitApplication(submitRequest1); } catch (YarnException | IOException e) {} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java index a7e7253171fe1..ccce2410cd438 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -189,9 +190,9 @@ public void testExpireCurrentZKSession() throws Exception{ public void testRMFailToTransitionToActive() throws Exception{ conf.set(YarnConfiguration.RM_HA_ID, "rm1"); final AtomicBoolean throwException = new AtomicBoolean(true); - Thread launchRM = new Thread() { + HadoopThread launchRM = new HadoopThread() { @Override - public void run() { + public void work() { rm1 = new MockRM(conf, true) { @Override synchronized void transitionToActive() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index d15a02c778a86..7dc011cd1fcb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -24,6 +24,7 @@ import java.util.UUID; import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import static org.assertj.core.api.Assertions.assertThat; @@ -515,7 +516,7 @@ void stopActiveServices() { rm.adminService.transitionToActive(requestInfo); // 3. Try Transition to standby - Thread t = new Thread(new Runnable() { + Thread t = new HadoopThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java index 4895987be4c5a..ef4bc6de7d813 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java @@ -236,14 +236,16 @@ private void checkAsyncSchedulerThreads(Thread currentThread){ Thread resourceCommitterService = null; for (Thread thread : threads) { StackTraceElement[] stackTrace = thread.getStackTrace(); - if(stackTrace.length>0){ - String stackBottom = stackTrace[stackTrace.length-1].toString(); - if(stackBottom.contains("AsyncScheduleThread.run")){ - numAsyncScheduleThread++; - asyncScheduleThread = thread; - }else if(stackBottom.contains("ResourceCommitterService.run")){ - numResourceCommitterService++; - resourceCommitterService = thread; + if (stackTrace.length > 0) { + for (StackTraceElement elem : stackTrace) { + String line = elem.toString(); + if (line.contains("AsyncScheduleThread.work")) { + numAsyncScheduleThread++; + asyncScheduleThread = thread; + } else if (line.contains("ResourceCommitterService.work")) { + numResourceCommitterService++; + resourceCommitterService = thread; + } } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index 15319a7e51585..92da1f682080c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -414,7 +415,7 @@ public void testFSRMStateStoreClientRetry() throws Exception { final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); cluster.shutdownNameNodes(); - Thread clientThread = new Thread(() -> { + Thread clientThread = new HadoopThread(() -> { try { store.storeApplicationStateInternal( ApplicationId.newInstance(100L, 1), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java index d41c8235b5c8c..42dfa45ce06c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreTestBase.TestDispatcher; import org.apache.hadoop.util.ZKUtil; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -112,9 +112,9 @@ public void testZKClientRetry() throws Exception { final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); testingServer.stop(); - Thread clientThread = new Thread() { + HadoopThread clientThread = new HadoopThread() { @Override - public void run() { + public void work() { try { store.getData(path); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index 7a0b49b878b11..bbe42d9d55f4c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -26,6 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -101,8 +102,8 @@ public void setUp() { resourceTrackerService.start(); } - private class ThirdNodeHeartBeatThread extends Thread { - public void run() { + private class ThirdNodeHeartBeatThread extends HadoopThread { + public void work() { int lastResponseID = 0; while (!stopT) { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java index 64ac256275527..39dc7d823db15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -817,7 +818,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { * simulate the concurrent calls for QueueMetrics#getQueueMetrics */ // thread A will keep querying the same queue metrics for a specified number of iterations - Thread threadA = new Thread(() -> { + Thread threadA = new HadoopThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics qm = QueueMetrics.getQueueMetrics().get(queueName); @@ -833,7 +834,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { } }); // thread B will keep adding new queue metrics for a specified number of iterations - Thread threadB = new Thread(() -> { + Thread threadB = new HadoopThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics.getQueueMetrics().put("q" + i, metrics); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index c77bb26de82cc..c074c0f8873f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -111,6 +111,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -1064,7 +1065,7 @@ public ApplicationMasterProtocol run() { // grab the scheduler lock from another thread // and verify an allocate call in this thread doesn't block on it final CyclicBarrier barrier = new CyclicBarrier(2); - Thread otherThread = new Thread(new Runnable() { + Thread otherThread = new HadoopThread(new Runnable() { @Override public void run() { synchronized(cs) { @@ -3088,7 +3089,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { // The scheduler thread holds the queue's read-lock for 5 seconds // then the preemption's read-lock is used - Thread schedulerThread = new Thread(() -> { + Thread schedulerThread = new HadoopThread(() -> { queue.readLock.lock(); try { Thread.sleep(5 * 1000); @@ -3101,7 +3102,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { }, "SCHEDULE"); // The complete thread locks/unlocks the queue's write-lock after 1 seconds - Thread completeThread = new Thread(() -> { + Thread completeThread = new HadoopThread(() -> { try { Thread.sleep(1000); } catch (InterruptedException e) { @@ -3115,7 +3116,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { // The refresh thread holds the preemption's write-lock after 2 seconds // while it calls the getChildQueues(ByTryLock) that // locks(tryLocks) the queue's read-lock - Thread refreshThread = new Thread(() -> { + Thread refreshThread = new HadoopThread(() -> { try { Thread.sleep(2 * 1000); } catch (InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java index 4e63d3858ab48..920795fd15111 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java @@ -31,6 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -801,7 +802,7 @@ public RMNodeLabelsManager createNodeLabelManager() { rm.close(); } - public static class NMHeartbeatThread extends Thread { + public static class NMHeartbeatThread extends HadoopThread { private List mockNMS; private int interval; private volatile boolean shouldStop = false; @@ -811,7 +812,7 @@ public NMHeartbeatThread(List mockNMs, int interval) { this.interval = interval; } - public void run() { + public void work() { while (true) { if (shouldStop) { break; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java index 1a682e7e06fd3..743fac60e444b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java @@ -39,7 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; - +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -422,8 +422,8 @@ public void testAllocateOfReservedContainerFromAnotherNode() .build()); final AtomicBoolean result = new AtomicBoolean(false); - Thread t = new Thread() { - public void run() { + Thread t = new HadoopThread() { + public void work() { try { MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1); result.set(true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java index 3edb87ebb903e..dc098981a6583 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java @@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -171,8 +172,8 @@ public void testAllocateReservationFromOtherNode() throws Exception { // Launch AM in a thread and in parallel free the preempted node's // unallocated resources in main thread - Thread t1 = new Thread() { - public void run() { + Thread t1 = new HadoopThread() { + public void work() { try { MockAM am2 = MockRM.launchAM(app2, rm, nm1); result.set(true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index d977a7adab452..f14b2cbe10591 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -62,6 +62,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -1129,7 +1130,7 @@ public void testUserLimitCache() throws Exception { // Set up allocation threads Thread[] threads = new Thread[numAllocationThreads]; for (int i = 0; i < numAllocationThreads; i++) { - threads[i] = new Thread(new Runnable() { + threads[i] = new HadoopThread(new Runnable() { @Override public void run() { try { @@ -4386,7 +4387,7 @@ public void testConcurrentAccess() throws Exception { final List conException = new ArrayList(); - Thread submitAndRemove = new Thread(new Runnable() { + Thread submitAndRemove = new HadoopThread(new Runnable() { @Override public void run() { @@ -4405,7 +4406,7 @@ public void run() { } }, "SubmitAndRemoveApplicationAttempt Thread"); - Thread getAppsInQueue = new Thread(new Runnable() { + Thread getAppsInQueue = new HadoopThread(new Runnable() { List apps = new ArrayList(); @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java index 706cdc9034cea..5958f79971392 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java @@ -37,7 +37,7 @@ private MockPreemptionThread(FairScheduler scheduler) { } @Override - public void run() { + public void work() { while (!Thread.interrupted()) { try { FSAppAttempt app = context.getStarvedApps().take(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java index f5254298dc42d..cf9f02f331fd8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -338,9 +339,9 @@ public void TestNodeAvailableResourceComparatorTransitivity() { } // To simulate unallocated resource changes - new Thread() { + new HadoopThread() { @Override - public void run() { + public void work() { for (int j = 0; j < 100; j++) { for (FSSchedulerNode node : clusterNodeTracker.getAllNodes()) { int i = ThreadLocalRandom.current().nextInt(-30, 30); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java index 33fdba582ea1e..fc6d9d646332c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java @@ -31,6 +31,7 @@ import java.util.TreeSet; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -484,9 +485,9 @@ public void testModWhileSorting(){ * Thread to simulate concurrent schedulable changes while sorting */ private Thread modificationThread(final List schedulableList) { - Thread modThread = new Thread() { + HadoopThread modThread = new HadoopThread() { @Override - public void run() { + public void work() { try { // This sleep is needed to make sure the sort has started before the // modifications start and finish diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 78c372c1b22b7..33e167d4aa759 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -74,6 +74,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -994,9 +995,9 @@ public Long answer(InvocationOnMock invocation) localDtr.init(conf); localDtr.start(); // submit a job that blocks during renewal - Thread submitThread = new Thread() { + HadoopThread submitThread = new HadoopThread() { @Override - public void run() { + public void work() { localDtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user", new Configuration()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java index 6e574d79086bd..324f65c582461 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; @@ -1735,7 +1736,7 @@ public void testSchedulerBulkActivities() throws Exception { } } - private class RESTClient extends Thread { + private class RESTClient extends HadoopThread { private int expectedCount; private boolean done = false; @@ -1754,7 +1755,7 @@ JSONObject getOutput() { } @Override - public void run() { + public void work() { WebTarget r = targetWithJsonObject(); Response response = r.path("ws").path("v1").path("cluster") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java index 8f3c4d0fe577e..0fb77c2369a7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java @@ -47,6 +47,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -202,7 +203,7 @@ protected void serviceStop() throws Exception { } protected void shutDown() { - new Thread(Router.this::stop).start(); + new HadoopThread(Router.this::stop).start(); } protected RouterClientRMService createClientRMProxyService() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java index 0c02fa1e8caae..13613a8fd2640 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; @@ -226,9 +227,9 @@ public void testClientPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * ClientRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends HadoopThread { private ClientRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java index d4b02f4d951f3..15cdcad9f4f26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; @@ -235,9 +236,9 @@ public void testRMAdminPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RMAdminRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends HadoopThread { private RMAdminRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java index ceb75e38a24a7..60f7bf8ac4a80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java @@ -31,6 +31,7 @@ import javax.ws.rs.core.Response; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; @@ -297,9 +298,9 @@ public void testWebPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RESTRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends HadoopThread { private RESTRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java index d8a7a56ac198b..1860bc1953ff7 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java @@ -24,6 +24,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Sets; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.document.NoDocumentFoundException; @@ -244,7 +245,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { if (executorService != null) { executorService.shutdown(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java index 7cfb7f2fe415c..3e15671872c1a 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java @@ -35,6 +35,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.HadoopThread; import org.apache.hadoop.yarn.server.timelineservice.metrics.PerNodeAggTimelineCollectorMetrics; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.CollectionType; @@ -279,7 +280,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { if (executorService != null) { executorService.shutdown(); } From 1457fa7ff8369b1a753402d985794d3e39af62ed Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 28 Aug 2025 05:22:17 +0200 Subject: [PATCH 2/5] DO NOT COMMIT use ayushtkn's Yetus branch --- dev-support/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 06095cc8a4f24..74e5bd18fce79 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -55,7 +55,7 @@ pipeline { environment { YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION='rel/0.14.0' + YETUS_VERSION='a7d29a6a72750a0c5c39512f33945e773e69303e' } parameters { @@ -71,7 +71,7 @@ pipeline { checkout([ $class: 'GitSCM', branches: [[name: "${env.YETUS_VERSION}"]], - userRemoteConfigs: [[ url: 'https://github.com/apache/yetus.git']]] + userRemoteConfigs: [[ url: 'https://github.com/ayushtkn/yetus.git']]] ) } } From f6343ab5a51aaf9c6dee5f0d8a17f8b499b960e2 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 28 Aug 2025 15:37:59 +0200 Subject: [PATCH 3/5] Add unit test for HadoopThread and Daemon --- .../concurrent/TestSubjectPropagation.java | 125 ++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java new file mode 100644 index 0000000000000..9b8aac9d2a8ad --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.concurrent; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.util.concurrent.Callable; + +import javax.security.auth.Subject; + +import org.apache.hadoop.security.authentication.util.SubjectUtil; +import org.apache.hadoop.util.Daemon; +import org.junit.jupiter.api.Test; + +public class TestSubjectPropagation { + + private Subject childSubject = null; + + @Test + public void testWork() { + Subject parentSubject = new Subject(); + childSubject = null; + + SubjectUtil.callAs(parentSubject, new Callable() { + public Void call() throws InterruptedException { + HadoopThread t = new HadoopThread() { + public void work() { + childSubject = SubjectUtil.current(); + } + }; + t.start(); + t.join(1000); + return (Void) null; + } + }); + + assertEquals(parentSubject, childSubject); + } + + @Test + public void testRunnable() { + Subject parentSubject = new Subject(); + childSubject = null; + + SubjectUtil.callAs(parentSubject, new Callable() { + public Void call() throws InterruptedException { + Runnable r = new Runnable() { + @Override + public void run() { + childSubject = SubjectUtil.current(); + } + }; + + HadoopThread t = new HadoopThread(r); + t.start(); + t.join(1000); + return (Void) null; + } + }); + + assertEquals(parentSubject, childSubject); + } + + @Test + public void testDeamonWork() { + Subject parentSubject = new Subject(); + childSubject = null; + + SubjectUtil.callAs(parentSubject, new Callable() { + public Void call() throws InterruptedException { + Daemon t = new Daemon() { + public void work() { + childSubject = SubjectUtil.current(); + } + }; + t.start(); + t.join(1000); + return (Void) null; + } + }); + + assertEquals(parentSubject, childSubject); + } + + @Test + public void testDaemonRunnable() { + Subject parentSubject = new Subject(); + childSubject = null; + + SubjectUtil.callAs(parentSubject, new Callable() { + public Void call() throws InterruptedException { + Runnable r = new Runnable() { + @Override + public void run() { + childSubject = SubjectUtil.current(); + } + }; + + Daemon t = new Daemon(r); + t.start(); + t.join(1000); + return (Void) null; + } + }); + + assertEquals(parentSubject, childSubject); + } + +} From 24aefca55cd6ba1d0ad3f16818c88a990268e786 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 28 Aug 2025 19:52:43 +0200 Subject: [PATCH 4/5] rename HadoopThread to SubjectInheritingThread and add more Javadoc --- .../hadoop/conf/ReconfigurableBase.java | 4 +- .../apache/hadoop/fs/CachingGetSpaceUsed.java | 4 +- .../hadoop/fs/DelegationTokenRenewer.java | 4 +- .../java/org/apache/hadoop/fs/FileSystem.java | 4 +- .../org/apache/hadoop/ha/StreamPumper.java | 4 +- .../java/org/apache/hadoop/ipc/Client.java | 6 +- .../java/org/apache/hadoop/ipc/Server.java | 10 +- .../metrics2/impl/MetricsSinkAdapter.java | 6 +- .../hadoop/net/unix/DomainSocketWatcher.java | 4 +- .../hadoop/security/UserGroupInformation.java | 4 +- .../AbstractDelegationTokenSecretManager.java | 4 +- .../service/launcher/InterruptEscalator.java | 4 +- .../apache/hadoop/util/AsyncDiskService.java | 4 +- .../BlockingThreadPoolExecutorService.java | 4 +- .../java/org/apache/hadoop/util/Daemon.java | 79 +++++----- .../org/apache/hadoop/util/GcTimeMonitor.java | 4 +- .../java/org/apache/hadoop/util/Shell.java | 4 +- .../hadoop/util/ShutdownHookManager.java | 4 +- .../hadoop/util/concurrent/HadoopThread.java | 105 ------------- .../concurrent/SubjectInheritingThread.java | 148 ++++++++++++++++++ .../apache/hadoop/conf/TestConfiguration.java | 8 +- .../hadoop/conf/TestReconfiguration.java | 4 +- .../hadoop/fs/FCStatisticsBaseTest.java | 4 +- .../hadoop/fs/TestFileSystemCaching.java | 4 +- .../java/org/apache/hadoop/fs/TestTrash.java | 8 +- .../fs/loadGenerator/LoadGenerator.java | 4 +- .../org/apache/hadoop/io/TestMD5Hash.java | 6 +- .../java/org/apache/hadoop/io/TestText.java | 4 +- .../hadoop/io/nativeio/TestNativeIO.java | 4 +- .../hadoop/io/nativeio/TestNativeIoInit.java | 10 +- .../hadoop/io/retry/TestFailoverProxy.java | 6 +- .../org/apache/hadoop/ipc/TestAsyncIPC.java | 8 +- .../hadoop/ipc/TestCallQueueManager.java | 10 +- .../apache/hadoop/ipc/TestFairCallQueue.java | 6 +- .../java/org/apache/hadoop/ipc/TestIPC.java | 14 +- .../hadoop/ipc/TestIPCServerResponder.java | 4 +- .../ipc/TestProtoBufRpcServerHandoff.java | 4 +- .../java/org/apache/hadoop/ipc/TestRPC.java | 4 +- .../hadoop/ipc/TestRPCWaitForProxy.java | 4 +- .../hadoop/ipc/TestRpcServerHandoff.java | 6 +- .../apache/hadoop/ipc/TestSocketFactory.java | 4 +- .../hadoop/metrics2/impl/TestSinkQueue.java | 6 +- .../metrics2/lib/TestMutableMetrics.java | 6 +- .../metrics2/source/TestJvmMetrics.java | 4 +- .../hadoop/net/unix/TestDomainSocket.java | 12 +- .../net/unix/TestDomainSocketWatcher.java | 10 +- .../security/TestAuthorizationContext.java | 4 +- .../hadoop/security/TestGroupsCaching.java | 10 +- .../security/TestLdapGroupsMapping.java | 6 +- .../security/TestUserGroupInformation.java | 4 +- .../hadoop/service/TestServiceLifecycle.java | 4 +- .../launcher/testservices/RunningService.java | 4 +- .../hadoop/test/MultithreadedTestUtil.java | 4 +- .../test/TestTimedOutTestsListener.java | 4 +- .../hadoop/util/TestAutoCloseableLock.java | 6 +- .../hadoop/util/TestInstrumentedLock.java | 8 +- .../util/TestInstrumentedReadWriteLock.java | 10 +- .../apache/hadoop/util/TestPureJavaCrc32.java | 6 +- .../hadoop/util/TestReflectionUtils.java | 6 +- .../org/apache/hadoop/util/TestShell.java | 6 +- .../util/TestShutdownThreadsHelper.java | 4 +- .../concurrent/TestSubjectPropagation.java | 4 +- .../crypto/key/kms/server/KMSBenchmark.java | 4 +- .../registry/server/dns/RegistryDNS.java | 4 +- .../server/services/RegistryAdminService.java | 4 +- .../apache/hadoop/hdfs/DeadNodeDetector.java | 6 +- .../hdfs/util/CombinedHostsFileReader.java | 4 +- .../ha/TestRequestHedgingProxyProvider.java | 4 +- .../hdfs/util/TestByteArrayManager.java | 8 +- .../hdfs/nfs/nfs3/AsyncDataService.java | 4 +- .../apache/hadoop/hdfs/nfs/TestUdpServer.java | 6 +- .../resolver/order/RouterResolver.java | 4 +- .../federation/router/ConnectionManager.java | 4 +- .../router/MountTableRefresherThread.java | 4 +- .../hdfs/server/federation/router/Router.java | 4 +- .../router/RouterHeartbeatService.java | 4 +- .../federation/router/RouterRpcServer.java | 4 +- ...RouterRefreshFairnessPolicyController.java | 8 +- .../router/TestRouterFederationRename.java | 4 +- .../federation/router/TestRouterRpc.java | 4 +- .../router/async/utils/SyncClass.java | 4 +- .../server/blockmanagement/BlockManager.java | 4 +- .../CacheReplicationMonitor.java | 4 +- .../blockmanagement/SlowDiskTracker.java | 4 +- .../hadoop/hdfs/server/common/Storage.java | 4 +- .../hdfs/server/datanode/BPServiceActor.java | 8 +- .../hadoop/hdfs/server/datanode/DataNode.java | 4 +- .../hdfs/server/datanode/VolumeScanner.java | 4 +- .../impl/FsDatasetAsyncDiskService.java | 4 +- .../datanode/fsdataset/impl/FsVolumeList.java | 6 +- .../impl/RamDiskAsyncLazyPersistService.java | 4 +- .../hdfs/server/namenode/FSEditLogAsync.java | 4 +- .../hadoop/hdfs/server/namenode/FSImage.java | 4 +- .../namenode/FSImageFormatProtobuf.java | 4 +- .../hadoop/hdfs/server/namenode/NameNode.java | 4 +- .../server/namenode/ha/EditLogTailer.java | 4 +- .../namenode/ha/StandbyCheckpointer.java | 4 +- .../hdfs/TestAppendSnapshotTruncate.java | 4 +- ...TestClientProtocolForPipelineRecovery.java | 6 +- .../hadoop/hdfs/TestDFSClientRetries.java | 16 +- .../hadoop/hdfs/TestDFSOutputStream.java | 6 +- .../org/apache/hadoop/hdfs/TestDFSShell.java | 4 +- .../apache/hadoop/hdfs/TestDatanodeDeath.java | 6 +- .../hadoop/hdfs/TestDeadNodeDetection.java | 4 +- .../apache/hadoop/hdfs/TestDecommission.java | 4 +- .../hdfs/TestDecommissionWithStriped.java | 6 +- .../apache/hadoop/hdfs/TestFileAppend2.java | 4 +- .../apache/hadoop/hdfs/TestFileAppend3.java | 6 +- .../apache/hadoop/hdfs/TestFileAppend4.java | 6 +- .../hadoop/hdfs/TestFileConcurrentReader.java | 10 +- .../hadoop/hdfs/TestFileCreationClient.java | 4 +- .../hadoop/hdfs/TestMultiThreadedHflush.java | 6 +- .../hadoop/hdfs/TestParallelReadUtil.java | 4 +- .../java/org/apache/hadoop/hdfs/TestRead.java | 4 +- ...TestReplaceDatanodeFailureReplication.java | 4 +- .../hdfs/TestReplaceDatanodeOnFailure.java | 4 +- .../client/impl/TestBlockReaderFactory.java | 8 +- .../server/balancer/TestBalancerService.java | 4 +- .../blockmanagement/TestBlockManager.java | 4 +- .../server/datanode/BlockReportTestBase.java | 4 +- .../server/datanode/TestBPOfferService.java | 4 +- .../server/datanode/TestBlockRecovery.java | 6 +- .../server/datanode/TestBlockRecovery2.java | 6 +- .../datanode/TestDataNodeHotSwapVolumes.java | 8 +- .../datanode/TestDataNodeVolumeFailure.java | 4 +- .../datanode/TestDataSetLockManager.java | 4 +- .../TestDataXceiverBackwardsCompat.java | 4 +- .../datanode/TestSimulatedFSDataset.java | 4 +- .../fsdataset/impl/TestFsDatasetImpl.java | 10 +- .../fsdataset/impl/TestFsVolumeList.java | 4 +- .../fsdataset/impl/TestLazyPersistFiles.java | 4 +- .../impl/TestReplicaCachingGetSpaceUsed.java | 4 +- .../server/diskbalancer/TestDiskBalancer.java | 4 +- .../namenode/NNThroughputBenchmark.java | 4 +- .../hdfs/server/namenode/TestAuditLogger.java | 6 +- .../hdfs/server/namenode/TestCheckpoint.java | 4 +- .../hdfs/server/namenode/TestDeleteRace.java | 12 +- .../hdfs/server/namenode/TestEditLog.java | 4 +- .../hdfs/server/namenode/TestEditLogRace.java | 8 +- .../server/namenode/TestFSNamesystemLock.java | 8 +- .../namenode/TestFSNamesystemMBean.java | 4 +- .../server/namenode/TestFileTruncate.java | 4 +- .../namenode/TestLargeDirectoryDelete.java | 4 +- .../server/namenode/TestListOpenFiles.java | 4 +- .../hdfs/server/namenode/TestMetaSave.java | 4 +- .../namenode/TestReencryptionHandler.java | 4 +- .../namenode/TestSecurityTokenEditLog.java | 4 +- .../namenode/ha/TestBootstrapStandby.java | 6 +- .../ha/TestConsistentReadsObserver.java | 12 +- .../ha/TestDelegationTokensWithHA.java | 4 +- .../server/namenode/ha/TestHASafeMode.java | 4 +- .../namenode/ha/TestRetryCacheWithHA.java | 4 +- .../namenode/ha/TestStandbyCheckpoints.java | 4 +- .../snapshot/TestOpenFilesWithSnapshot.java | 4 +- .../TestShortCircuitLocalRead.java | 6 +- .../hdfs/util/TestReferenceCountMap.java | 6 +- .../hadoop/hdfs/web/TestWebHDFSForHA.java | 4 +- .../hadoop/hdfs/web/TestWebHdfsTimeouts.java | 4 +- .../hadoop/mapred/LocalContainerLauncher.java | 4 +- .../jobhistory/JobHistoryEventHandler.java | 4 +- .../hadoop/mapreduce/v2/app/MRAppMaster.java | 4 +- .../v2/app/TaskHeartbeatHandler.java | 4 +- .../v2/app/commit/CommitterEventHandler.java | 6 +- .../app/launcher/ContainerLauncherImpl.java | 4 +- .../mapreduce/v2/app/rm/RMCommunicator.java | 4 +- .../v2/app/rm/RMContainerAllocator.java | 6 +- .../v2/app/speculate/DefaultSpeculator.java | 4 +- .../mapreduce/v2/app/MRAppBenchmark.java | 4 +- .../local/TestLocalContainerAllocator.java | 4 +- .../apache/hadoop/mapred/LocalJobRunner.java | 4 +- .../apache/hadoop/mapred/CleanupQueue.java | 4 +- .../org/apache/hadoop/mapred/MapTask.java | 4 +- .../java/org/apache/hadoop/mapred/Task.java | 6 +- .../hadoop/mapred/pipes/Application.java | 4 +- .../hadoop/mapred/pipes/BinaryProtocol.java | 4 +- .../hadoop/mapreduce/lib/chain/Chain.java | 6 +- .../lib/map/MultithreadedMapper.java | 4 +- .../mapreduce/task/reduce/EventFetcher.java | 4 +- .../hadoop/mapreduce/task/reduce/Fetcher.java | 4 +- .../mapreduce/task/reduce/MergeThread.java | 4 +- .../task/reduce/ShuffleSchedulerImpl.java | 4 +- .../hadoop/mapreduce/util/ProcessTree.java | 4 +- .../apache/hadoop/mapred/TestIndexCache.java | 10 +- .../mapred/TestLocatedFileStatusFetcher.java | 4 +- .../mapred/TestTaskProgressReporter.java | 8 +- .../v2/hs/TestHistoryFileManager.java | 6 +- .../mapreduce/v2/hs/TestJobHistoryEvents.java | 4 +- ...tUnnecessaryBlockingOnHistoryFileInfo.java | 6 +- .../java/org/apache/hadoop/FailingMapper.java | 4 +- .../org/apache/hadoop/fs/JHLogAnalyzer.java | 4 +- .../fs/loadGenerator/LoadGeneratorMR.java | 4 +- .../apache/hadoop/mapred/ReliabilityTest.java | 8 +- .../org/apache/hadoop/mapred/TestCollect.java | 4 +- .../mapred/jobcontrol/TestJobControl.java | 4 +- .../jobcontrol/TestLocalJobControl.java | 4 +- .../hadoop/mapreduce/TestLocalRunner.java | 4 +- .../jobcontrol/TestMapReduceJobControl.java | 4 +- .../TestMapReduceJobControlWithMocks.java | 4 +- .../mapreduce/v2/MiniMRYarnCluster.java | 4 +- .../nativetask/StatusReportChecker.java | 4 +- .../examples/terasort/TeraInputFormat.java | 4 +- .../fs/s3a/ITestS3AIOStatisticsContext.java | 4 +- .../fs/s3a/scale/ITestS3AConcurrentOps.java | 4 +- .../AzureFileSystemThreadPoolExecutor.java | 4 +- .../fs/azure/BlockBlobAppendStream.java | 4 +- .../hadoop/fs/azure/SelfRenewingLease.java | 4 +- .../azure/metrics/BandwidthGaugeUpdater.java | 4 +- .../fs/azurebfs/services/ListActionTaker.java | 4 +- .../services/ReadBufferManagerV1.java | 4 +- .../services/ReadBufferManagerV2.java | 4 +- .../ITestAzureConcurrentOutOfBandIo.java | 4 +- ...rationsExceptionHandlingMultiThreaded.java | 26 +-- .../azure/ITestNativeAzureFileSystemLive.java | 6 +- .../azure/NativeAzureFileSystemBaseTest.java | 6 +- .../TestNativeAzureFileSystemConcurrency.java | 4 +- .../metrics/TestBandwidthGaugeUpdater.java | 4 +- .../ITestAzureBlobFileSystemAppend.java | 4 +- .../ITestAzureBlobFileSystemRename.java | 6 +- .../compat/common/HdfsCompatShellScope.java | 4 +- .../tools/util/TestProducerConsumer.java | 6 +- .../tools/dynamometer/ApplicationMaster.java | 4 +- .../hadoop/tools/dynamometer/Client.java | 4 +- .../tools/dynamometer/DynoInfraUtils.java | 4 +- .../dynamometer/TestDynamometerInfra.java | 4 +- .../audit/AuditReplayThread.java | 4 +- .../procedure/BalanceProcedureScheduler.java | 8 +- .../apache/hadoop/mapred/gridmix/Gridmix.java | 4 +- .../hadoop/mapred/gridmix/JobMonitor.java | 4 +- .../apache/hadoop/mapred/gridmix/LoadJob.java | 6 +- .../mapred/gridmix/ReplayJobFactory.java | 4 +- .../mapred/gridmix/SerialJobFactory.java | 4 +- .../hadoop/mapred/gridmix/Statistics.java | 4 +- .../mapred/gridmix/StressJobFactory.java | 4 +- .../service/ShutdownHook.java | 4 +- .../apache/hadoop/streaming/PipeMapRed.java | 6 +- .../distributedshell/ApplicationMaster.java | 4 +- .../DistributedShellBaseTest.java | 4 +- .../distributedshell/TestDSAppMaster.java | 4 +- .../distributedshell/TestDSTimelineV20.java | 6 +- .../TestDSWithMultipleNodeManager.java | 6 +- .../UnmanagedAMLauncher.java | 6 +- .../client/SystemServiceManagerImpl.java | 4 +- .../hadoop/yarn/service/ClientAMService.java | 4 +- .../service/utils/TestServiceApiUtil.java | 4 +- .../client/api/ContainerShellWebSocket.java | 4 +- .../api/async/impl/AMRMClientAsyncImpl.java | 6 +- .../api/async/impl/NMClientAsyncImpl.java | 6 +- .../apache/hadoop/yarn/client/cli/TopCLI.java | 6 +- .../yarn/client/ProtocolHATestBase.java | 4 +- ...TestFederationRMFailoverProxyProvider.java | 4 +- .../hadoop/yarn/client/TestGetGroups.java | 4 +- ...HedgingRequestRMFailoverProxyProvider.java | 4 +- .../hadoop/yarn/client/TestRMFailover.java | 6 +- ...gerAdministrationProtocolPBClientImpl.java | 4 +- .../api/async/impl/TestNMClientAsync.java | 4 +- .../yarn/client/api/impl/TestYarnClient.java | 4 +- .../hadoop/yarn/event/AsyncDispatcher.java | 6 +- .../hadoop/yarn/event/EventDispatcher.java | 4 +- .../yarn/util/AbstractLivelinessMonitor.java | 4 +- .../TestYarnUncaughtExceptionHandler.java | 8 +- .../TestAggregatedLogFormat.java | 4 +- .../yarn/util/TestProcfsBasedProcessTree.java | 4 +- .../server/timeline/LeveldbTimelineStore.java | 4 +- .../timeline/RollingLevelDBTimelineStore.java | 4 +- .../server/AMHeartbeatRequestHandler.java | 4 +- .../server/uam/UnmanagedAMPoolManager.java | 4 +- .../uam/TestUnmanagedApplicationManager.java | 4 +- .../server/nodemanager/ContainerExecutor.java | 4 +- .../yarn/server/nodemanager/NodeManager.java | 6 +- .../nodemanager/NodeResourceMonitorImpl.java | 4 +- .../nodemanager/NodeStatusUpdaterImpl.java | 6 +- .../WindowsSecureContainerExecutor.java | 4 +- .../container/ContainerImpl.java | 4 +- .../CGroupElasticMemoryController.java | 4 +- .../ResourceLocalizationService.java | 8 +- .../monitor/ContainersMonitorImpl.java | 6 +- .../TestLinuxContainerExecutor.java | 4 +- .../nodemanager/TestNodeManagerResync.java | 4 +- .../nodemanager/TestNodeStatusUpdater.java | 4 +- .../localizer/TestContainerLocalizer.java | 6 +- .../util/TestCgroupsLCEResourcesHandler.java | 4 +- .../resourcemanager/ResourceManager.java | 6 +- .../amlauncher/ApplicationMasterLauncher.java | 4 +- .../FederationStateStoreService.java | 4 +- .../metrics/TimelineServiceV1Publisher.java | 4 +- .../monitor/SchedulingMonitor.java | 4 +- .../recovery/ZKRMStateStore.java | 4 +- .../rmapp/attempt/RMAppAttemptImpl.java | 4 +- .../scheduler/AbstractYarnScheduler.java | 4 +- .../activities/ActivitiesManager.java | 4 +- .../scheduler/capacity/CapacityScheduler.java | 6 +- .../fair/AllocationFileLoaderService.java | 4 +- .../scheduler/fair/FSPreemptionThread.java | 4 +- .../scheduler/fair/FairScheduler.java | 4 +- .../scheduler/placement/MultiNodeSorter.java | 4 +- .../security/DelegationTokenRenewer.java | 6 +- .../server/resourcemanager/ACLsTestBase.java | 4 +- .../resourcemanager/TestApplicationACLs.java | 4 +- .../resourcemanager/TestClientRMService.java | 4 +- .../TestLeaderElectorService.java | 4 +- .../yarn/server/resourcemanager/TestRMHA.java | 4 +- .../recovery/TestFSRMStateStore.java | 4 +- ...TestZKRMStateStoreZKClientConnections.java | 4 +- .../resourcetracker/TestNMExpiry.java | 4 +- .../scheduler/TestQueueMetrics.java | 6 +- .../capacity/TestCapacityScheduler.java | 10 +- .../TestCapacitySchedulerAsyncScheduling.java | 4 +- .../TestCapacitySchedulerMultiNodes.java | 4 +- ...citySchedulerMultiNodesWithPreemption.java | 4 +- .../scheduler/capacity/TestLeafQueue.java | 8 +- .../fair/TestContinuousScheduling.java | 4 +- .../TestDominantResourceFairnessPolicy.java | 4 +- .../security/TestDelegationTokenRenewer.java | 4 +- .../TestRMWebServicesSchedulerActivities.java | 4 +- .../hadoop/yarn/server/router/Router.java | 4 +- .../clientrm/TestRouterClientRMService.java | 4 +- .../rmadmin/TestRouterRMAdminService.java | 4 +- .../router/webapp/TestRouterWebServices.java | 4 +- .../cosmosdb/CosmosDBDocumentStoreReader.java | 4 +- .../cosmosdb/CosmosDBDocumentStoreWriter.java | 4 +- 320 files changed, 978 insertions(+), 932 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index 5d7077668e0f3..0ca13ad0d79e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -22,7 +22,7 @@ import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -106,7 +106,7 @@ public Collection getChangedProperties( /** * A background thread to apply configuration changes. */ - private static class ReconfigurationThread extends HadoopThread { + private static class ReconfigurationThread extends SubjectInheritingThread { private ReconfigurableBase parent; ReconfigurationThread(ReconfigurableBase base) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java index b354314952e0f..43e7121dc26d2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -108,7 +108,7 @@ void init() { */ private void initRefreshThread(boolean runImmediately) { if (refreshInterval > 0) { - refreshUsed = new HadoopThread(new RefreshThread(this, runImmediately), + refreshUsed = new SubjectInheritingThread(new RefreshThread(this, runImmediately), "refreshUsed-" + dirPath); refreshUsed.setDaemon(true); refreshUsed.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 20eed047b5136..0892db697d7a8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -30,7 +30,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,7 +39,7 @@ */ @InterfaceAudience.Private public class DelegationTokenRenewer - extends HadoopThread { + extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory .getLogger(DelegationTokenRenewer.class); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 8f292dbfe3819..957cac07d972c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -81,7 +81,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.tracing.TraceScope; import org.apache.hadoop.util.Preconditions; @@ -4088,7 +4088,7 @@ private interface StatisticsAggregator { static { STATS_DATA_REF_QUEUE = new ReferenceQueue<>(); // start a single daemon cleaner thread - STATS_DATA_CLEANER = new HadoopThread(new StatisticsDataReferenceCleaner()); + STATS_DATA_CLEANER = new SubjectInheritingThread(new StatisticsDataReferenceCleaner()); STATS_DATA_CLEANER. setName(StatisticsDataReferenceCleaner.class.getName()); STATS_DATA_CLEANER.setDaemon(true); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java index f117cbcfbfa20..86ec95c14e308 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ha; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import java.io.BufferedReader; @@ -51,7 +51,7 @@ enum StreamType { this.stream = stream; this.type = type; - thread = new HadoopThread(new Runnable() { + thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index d3de874d16752..b036caedfa3e1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -54,7 +54,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGet; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.Tracer; import org.slf4j.Logger; @@ -408,7 +408,7 @@ public synchronized void setRpcResponse(Writable rpcResponse) { /** Thread that reads responses and notifies callers. Each connection owns a * socket connected to a remote address. Calls are multiplexed through this * socket: responses may be delivered out of order. */ - private class Connection extends HadoopThread { + private class Connection extends SubjectInheritingThread { private InetSocketAddress server; // server ip:port private final ConnectionId remoteId; // connection id private AuthMethod authMethod; // authentication method @@ -449,7 +449,7 @@ private class Connection extends HadoopThread { Consumer removeMethod) { this.remoteId = remoteId; this.server = remoteId.getAddress(); - this.rpcRequestThread = new HadoopThread(new RpcRequestSender(), + this.rpcRequestThread = new SubjectInheritingThread(new RpcRequestSender(), "IPC Parameter Sending Thread for " + remoteId); this.rpcRequestThread.setDaemon(true); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 49d316a9f4678..5a60987a1b6e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -124,7 +124,7 @@ import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.tracing.Span; @@ -1473,7 +1473,7 @@ public String toString() { } /** Listens on the socket. Creates jobs for the handler threads*/ - private class Listener extends HadoopThread { + private class Listener extends SubjectInheritingThread { private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server @@ -1522,7 +1522,7 @@ void setIsAuxiliary() { this.isOnAuxiliaryPort = true; } - private class Reader extends HadoopThread { + private class Reader extends SubjectInheritingThread { final private BlockingQueue pendingConnections; private final Selector readSelector; @@ -1762,7 +1762,7 @@ Reader getReader() { } // Sends responses of RPC back to clients. - private class Responder extends HadoopThread { + private class Responder extends SubjectInheritingThread { private final Selector writeSelector; private int pending; // connections waiting to register @@ -3221,7 +3221,7 @@ private void internalQueueCall(Call call, boolean blocking) } /** Handles queued calls . */ - private class Handler extends HadoopThread { + private class Handler extends SubjectInheritingThread { public Handler(int instanceNumber) { this.setDaemon(true); this.setName("IPC Server handler "+ instanceNumber + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index 60ebc96d1e9f3..ae2c890fa1c89 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -34,7 +34,7 @@ import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,7 +49,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { private final MetricsSink sink; private final MetricsFilter sourceFilter, recordFilter, metricFilter; private final SinkQueue queue; - private final HadoopThread sinkThread; + private final SubjectInheritingThread sinkThread; private volatile boolean stopping = false; private volatile boolean inError = false; private final int periodMs, firstRetryDelay, retryCount; @@ -85,7 +85,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { "Dropped updates per sink", 0); qsize = registry.newGauge("Sink_"+ name + "Qsize", "Queue size", 0); - sinkThread = new HadoopThread() { + sinkThread = new SubjectInheritingThread() { @Override public void work() { publishMetricsFromQueue(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index f867370add712..1c552946a5ca5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -440,7 +440,7 @@ private void sendCallbackAndRemove(String caller, } @VisibleForTesting - final Thread watcherThread = new HadoopThread(new Runnable() { + final Thread watcherThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { if (LOG.isDebugEnabled()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 652d5001ab4e1..e0b0f74733e64 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -89,7 +89,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -930,7 +930,7 @@ private void executeAutoRenewalTask(final String userName, new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread t = new HadoopThread(r); + Thread t = new SubjectInheritingThread(r); t.setDaemon(true); t.setName("TGT Renewer for " + userName); return t; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 960f6aaf12f40..194042948bdf1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -60,7 +60,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.functional.InvocationRaisingIOE; import org.slf4j.Logger; @@ -912,7 +912,7 @@ public boolean isRunning() { return running; } - private class ExpiredTokenRemover extends HadoopThread { + private class ExpiredTokenRemover extends SubjectInheritingThread { private long lastMasterKeyUpdate; private long lastTokenCacheCleanup; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java index b9f11152203be..9400f9590b2c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java @@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -117,7 +117,7 @@ public void interrupted(IrqHandler.InterruptData interruptData) { //start an async shutdown thread with a timeout ServiceForcedShutdown shutdown = new ServiceForcedShutdown(service, shutdownTimeMillis); - Thread thread = new HadoopThread(shutdown); + Thread thread = new SubjectInheritingThread(shutdown); thread.setDaemon(true); thread.setName("Service Forced Shutdown"); thread.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java index 5b877618ef5e0..6c9157a8a7002 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java @@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,7 +75,7 @@ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new HadoopThread(threadGroup, r); + return new SubjectInheritingThread(threadGroup, r); } }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java index e1d01f82c919b..c7b249444bc00 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * This ExecutorService blocks the submission of new tasks when its queue is @@ -72,7 +72,7 @@ static ThreadFactory getNamedThreadFactory(final String prefix) { public Thread newThread(Runnable r) { final String name = prefix + "-pool" + poolNum + "-t" + threadNumber.getAndIncrement(); - return new HadoopThread(group, r, name); + return new SubjectInheritingThread(group, r, name); } }; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java index eb789755eea22..db01b745f7114 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java @@ -27,59 +27,59 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.authentication.util.SubjectUtil; -/** A thread that has called {@link Thread#setDaemon(boolean) } with true. - * - * The runnable code must either be specified in the runnable parameter or - * in the override work() method. - * - * The subject propagation is already added in either case. +/** + * A thread that has called {@link Thread#setDaemon(boolean) } with true. + *

+ * The runnable code must either be specified in the runnable parameter or in + * the overridden work() method. + *

+ * See {@link org.apache.hadoop.util.concurrent.SubjectInheritingThread} for the Subject inheritance behavior this + * class adds. * - * */ -@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) + */ +@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" }) @InterfaceStability.Unstable public class Daemon extends Thread { Subject startSubject; - - @Override - public final void start() { - startSubject = SubjectUtil.current(); - super.start(); - } - + + @Override + public final void start() { + startSubject = SubjectUtil.current(); + super.start(); + } + /** * Override this instead of run() */ public void work() { - throw new IllegalArgumentException(""); + if (runnable != null) { + runnable.run(); + } } - + @Override public final void run() { - SubjectUtil.doAs(startSubject, new PrivilegedAction() { - - @Override - public Void run() { - if (runnable != null) { - runnable.run(); - } else { - work(); - } - return null; - } - - }); + SubjectUtil.doAs(startSubject, new PrivilegedAction() { + + @Override + public Void run() { + work(); + return null; + } + + }); } - + { - setDaemon(true); // always a daemon + setDaemon(true); // always a daemon } /** - * Provide a factory for named daemon threads, - * for use in ExecutorServices constructors + * Provide a factory for named daemon threads, for use in ExecutorServices + * constructors */ - @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) + @InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" }) public static class DaemonFactory extends Daemon implements ThreadFactory { @Override @@ -90,6 +90,7 @@ public Thread newThread(Runnable runnable) { } Runnable runnable = null; + /** Construct a daemon thread. */ public Daemon() { super(); @@ -97,23 +98,25 @@ public Daemon() { /** * Construct a daemon thread. + * * @param runnable runnable. */ public Daemon(Runnable runnable) { super(runnable); this.runnable = runnable; - this.setName(((Object)runnable).toString()); + this.setName(((Object) runnable).toString()); } /** * Construct a daemon thread to be part of a specified thread group. - * @param group thread group. + * + * @param group thread group. * @param runnable runnable. */ public Daemon(ThreadGroup group, Runnable runnable) { super(group, runnable); this.runnable = runnable; - this.setName(((Object)runnable).toString()); + this.setName(((Object) runnable).toString()); } public Runnable getRunnable() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java index a8aa6079c557f..b27fed0fa659f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * This class monitors the percentage of time the JVM is paused in GC within @@ -31,7 +31,7 @@ * hook which will be called whenever this percentage exceeds the specified * threshold. */ -public class GcTimeMonitor extends HadoopThread { +public class GcTimeMonitor extends SubjectInheritingThread { private final long maxGcTimePercentage; private final long observationWindowMs, sleepIntervalMs; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index d025af725a955..49a17cc26d4f2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -36,7 +36,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.slf4j.Logger; @@ -1021,7 +1021,7 @@ private void runCommand() throws IOException { // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new HadoopThread() { + Thread errThread = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java index de521001d2a79..07d8fb07ac1a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +85,7 @@ public final class ShutdownHookManager { static { try { Runtime.getRuntime().addShutdownHook( - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { if (MGR.shutdownInProgress.getAndSet(true)) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java deleted file mode 100644 index f68c3f6f4fff4..0000000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThread.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.util.concurrent; - -import java.security.PrivilegedAction; -import javax.security.auth.Subject; - -import org.apache.hadoop.security.authentication.util.SubjectUtil; - -/** - * Helper class to restore Subject propagation behavior after the JEP411/JEP486 - * changes - * - * Runnables can be specified normally, but the work() method has to be - * overridden instead of run() when subclassing. - */ -public class HadoopThread extends Thread { - - Subject startSubject; - Runnable hadoopTarget; - - public HadoopThread() { - super(); - } - - public HadoopThread(Runnable target) { - super(); - this.hadoopTarget = target; - } - - public HadoopThread(ThreadGroup group, Runnable target) { - // The target passed to Thread has no effect, we only pass it - // because there is no super(group) constructor. - super(group, target); - this.hadoopTarget = target; - } - - public HadoopThread(Runnable target, String name) { - super(name); - this.hadoopTarget = target; - } - - public HadoopThread(String name) { - super(name); - } - - public HadoopThread(ThreadGroup group, String name) { - super(group, name); - } - - public HadoopThread(ThreadGroup group, Runnable target, String name) { - super(group, name); - this.hadoopTarget = target; - } - - @Override - public final void start() { - startSubject = SubjectUtil.current(); - super.start(); - } - - /** - * Override this instead of run() - * - * It is really unfortunate that we have to introduce a new method and cannot reuse run(), - * but since run() is designed to be overridden, I couldn't find any other way to make this work. - * - */ - public void work() { - throw new IllegalArgumentException("No Runnable was specified and work() is not overriden"); - } - - @Override - public final void run() { - SubjectUtil.doAs(startSubject, new PrivilegedAction() { - - @Override - public Void run() { - if (hadoopTarget != null) { - hadoopTarget.run(); - } else { - work(); - } - return null; - } - - }); - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java new file mode 100644 index 0000000000000..91034922a72e2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util.concurrent; + +import java.security.PrivilegedAction; +import javax.security.auth.Subject; + +import org.apache.hadoop.security.authentication.util.SubjectUtil; + +/** + * Helper class to restore Subject propagation behavior of threads after the + * JEP411/JEP486 changes. + *

+ * Java propagates the current Subject to any new Threads in all version up to + * Java 21. In Java 22-23 the Subject is only propagated if the SecurityManager + * is enabled, while in Java 24+ it is never propagated. + *

+ * Hadoop security heavily relies on the original behavior, as Subject is at the + * core of JAAS. This class wraps thread. It overrides start() and saves the + * Subject of the current thread, and wraps the payload in a + * Subject.doAs()/callAs() call to restorere it in the newly created Thread. + *

+ * When specifying a Runnable, this class is used in exactly the same way as + * Thread. + *

+ * {@link #run()} cannot be directly overridden, as that would also override the + * subject restoration logic. SubjectInheritingThread provides a {@link work()} + * method instead, which is wrapped and invoked by its own final {@link run()} + * method. + */ +public class SubjectInheritingThread extends Thread { + + private Subject startSubject; + // {@link Thread#target} is private, so we need our own + private Runnable hadoopTarget; + + /** + * Behaves similar to {@link Thread#Thread()} constructor, but the code to run + * must be specified by overriding the {@link #work()} instead of the {link + * #run()} method. + */ + public SubjectInheritingThread() { + super(); + } + + /** + * Behaves similar to {@link Thread#Thread(Runnable)} constructor. + */ + public SubjectInheritingThread(Runnable target) { + super(); + this.hadoopTarget = target; + } + + /** + * Behaves similar to {@link Thread#Thread(ThreadGroup, Runnable)} constructor. + */ + public SubjectInheritingThread(ThreadGroup group, Runnable target) { + // The target passed to Thread has no effect, we only pass it + // because there is no super(group) constructor. + super(group, target); + this.hadoopTarget = target; + } + + /** + * Behaves similar to {@link Thread#Thread(Runnable, String)} constructor. + */ + public SubjectInheritingThread(Runnable target, String name) { + super(name); + this.hadoopTarget = target; + } + + /** + * Behaves similar to {@link Thread#Thread(String)} constructor. + */ + public SubjectInheritingThread(String name) { + super(name); + } + + /** + * Behaves similar to {@link Thread#Thread(ThreadGroup, String)} constructor. + */ + public SubjectInheritingThread(ThreadGroup group, String name) { + super(group, name); + } + + /** + * Behaves similar to {@link Thread#Thread(ThreadGroup, Runnable, String)} + * constructor. + */ + public SubjectInheritingThread(ThreadGroup group, Runnable target, String name) { + super(group, name); + this.hadoopTarget = target; + } + + /** + * Behaves similar to pre-Java 22 {@link Thread#start()}. It saves the current + * Subject before starting the new thread, which is then used as the Subject for + * the Runnable or the overridden work() method. + */ + @Override + public final void start() { + startSubject = SubjectUtil.current(); + super.start(); + } + + /** + * This is the equivalent of {@link Thread#run()}. Override this instead of + * {@link #run()} Subject will be propagated like in pre-Java 22 Thread. + */ + public void work() { + if (hadoopTarget != null) { + hadoopTarget.run(); + } + } + + /** + * This cannot be overridden in this class. Override the {@link #work()} method + * instead which behaves like pre-Java 22 {@link Thread#run()} + */ + @Override + public final void run() { + SubjectUtil.doAs(startSubject, new PrivilegedAction() { + + @Override + public Void run() { + work(); + return null; + } + + }); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 408b4ad66c3ea..0b60664b2e852 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -81,7 +81,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; @@ -2479,7 +2479,7 @@ public void testConcurrentAccesses() throws Exception { Configuration conf = new Configuration(); conf.addResource(fileResource); - class ConfigModifyThread extends HadoopThread { + class ConfigModifyThread extends SubjectInheritingThread { final private Configuration config; final private String prefix; @@ -2747,7 +2747,7 @@ private static Configuration checkCDATA(byte[] bytes) { @Test public void testConcurrentModificationDuringIteration() throws InterruptedException { Configuration configuration = new Configuration(); - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { while (true) { configuration.set(String.valueOf(Math.random()), String.valueOf(Math.random())); } @@ -2755,7 +2755,7 @@ public void testConcurrentModificationDuringIteration() throws InterruptedExcept AtomicBoolean exceptionOccurred = new AtomicBoolean(false); - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { while (true) { try { configuration.iterator(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index d3c3b88afa802..3f089a59d0f20 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -22,7 +22,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -285,7 +285,7 @@ public void testReconfigure() { public void testThread() throws ReconfigurationException { ReconfigurableDummy dummy = new ReconfigurableDummy(conf1); assertTrue(dummy.getConf().get(PROP1).equals(VAL1)); - Thread dummyThread = new HadoopThread(dummy); + Thread dummyThread = new SubjectInheritingThread(dummy); dummyThread.start(); try { Thread.sleep(500); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java index fae068aff9c9a..aa50f0d9f49ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java @@ -40,7 +40,7 @@ import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,7 +74,7 @@ public void testStatisticsOperations() throws Exception { stats.incrementWriteOps(123); assertEquals(123, stats.getWriteOps()); - HadoopThread thread = new HadoopThread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override public void work() { stats.incrementWriteOps(1); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java index 9819c13ac185b..491387efa250c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java @@ -36,7 +36,7 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_CREATION_PARALLEL_COUNT; @@ -125,7 +125,7 @@ public void initialize(URI uri, Configuration conf) throws IOException { @Test public void testCacheEnabledWithInitializeForeverFS() throws Exception { final Configuration conf = new Configuration(); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { conf.set("fs.localfs1.impl", "org.apache.hadoop.fs." + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index 27bcc0c108485..a1241a384d49a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -49,7 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * This class tests commands from Trash. @@ -725,7 +725,7 @@ public void testTrashEmptier() throws Exception { // Start Emptier in background Runnable emptier = trash.getEmptier(); - Thread emptierThread = new HadoopThread(emptier); + Thread emptierThread = new SubjectInheritingThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -793,7 +793,7 @@ public void testTrashEmptierCleanDirNotInCheckpointDir() throws Exception { // Start Emptier in background. Runnable emptier = trash.getEmptier(); - Thread emptierThread = new HadoopThread(emptier); + Thread emptierThread = new SubjectInheritingThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -1050,7 +1050,7 @@ private void verifyAuditableTrashEmptier(Trash trash, Thread emptierThread = null; try { Runnable emptier = trash.getEmptier(); - emptierThread = new HadoopThread(emptier); + emptierThread = new SubjectInheritingThread(emptier); emptierThread.start(); // Shutdown the emptier thread after a given time diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index c79ac1da6b77d..75a28b59767f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -46,7 +46,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; @@ -216,7 +216,7 @@ public LoadGenerator(Configuration conf) throws IOException, UnknownHostExceptio * A thread runs for the specified elapsed time if the time isn't zero. * Otherwise, it runs forever. */ - private class DFSClientThread extends HadoopThread { + private class DFSClientThread extends SubjectInheritingThread { private int id; private long [] executionTime = new long[TOTAL_OP_TYPES]; private long [] totalNumOfOps = new long[TOTAL_OP_TYPES]; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java index a356b5e0fdf2b..9b92bd9e39755 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java @@ -18,7 +18,7 @@ package org.apache.hadoop.io; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -94,7 +94,7 @@ public void testMD5Hash() throws Exception { assertTrue(closeHash1.hashCode() != closeHash2.hashCode(), "hash collision"); - HadoopThread t1 = new HadoopThread() { + SubjectInheritingThread t1 = new SubjectInheritingThread() { @Override public void work() { for (int i = 0; i < 100; i++) { @@ -104,7 +104,7 @@ public void work() { } }; - HadoopThread t2 = new HadoopThread() { + SubjectInheritingThread t2 = new SubjectInheritingThread() { @Override public void work() { for (int i = 0; i < 100; i++) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index 04579099b3acb..7ca61a6358ce7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -27,7 +27,7 @@ import org.apache.hadoop.constants.ConfigConstants; import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; @@ -301,7 +301,7 @@ public void testTextText() throws CharacterCodingException { assertEquals(8, a.copyBytes().length); } - private class ConcurrentEncodeDecodeThread extends HadoopThread { + private class ConcurrentEncodeDecodeThread extends SubjectInheritingThread { public ConcurrentEncodeDecodeThread(String name) { super(name); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 8f96f338da973..9bccb52a6048c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -56,7 +56,7 @@ import org.apache.hadoop.test.StatUtils; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.*; import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*; @@ -137,7 +137,7 @@ public void testMultiThreadedFstat() throws Exception { new AtomicReference(); List statters = new ArrayList(); for (int i = 0; i < 10; i++) { - HadoopThread statter = new HadoopThread() { + SubjectInheritingThread statter = new SubjectInheritingThread() { @Override public void work() { long et = Time.now() + 5000; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java index 140a850bc2ac6..badbbfa23d396 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java @@ -22,7 +22,7 @@ import java.io.IOException; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -45,13 +45,13 @@ public class TestNativeIoInit { @Test @Timeout(value = 10) public void testDeadlockLinux() throws Exception { - Thread one = new HadoopThread() { + Thread one = new SubjectInheritingThread() { @Override public void work() { NativeIO.isAvailable(); } }; - Thread two = new HadoopThread() { + Thread two = new SubjectInheritingThread() { @Override public void work() { NativeIO.POSIX.isAvailable(); @@ -67,13 +67,13 @@ public void work() { @Timeout(value = 10) public void testDeadlockWindows() throws Exception { assumeTrue(Path.WINDOWS, "Expected windows"); - HadoopThread one = new HadoopThread() { + SubjectInheritingThread one = new SubjectInheritingThread() { @Override public void work() { NativeIO.isAvailable(); } }; - HadoopThread two = new HadoopThread() { + SubjectInheritingThread two = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java index 54260514f7c84..2533f0944dda7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java @@ -28,7 +28,7 @@ import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.util.ThreadUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; public class TestFailoverProxy { @@ -253,7 +253,7 @@ public String failsIfIdentifierDoesntMatch(String identifier) } - private static class ConcurrentMethodThread extends HadoopThread { + private static class ConcurrentMethodThread extends SubjectInheritingThread { private UnreliableInterface unreliable; public String result; @@ -328,7 +328,7 @@ public void testFailoverBetweenMultipleStandbys() RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, 10, 1000, 10000)); - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java index 932af5d3dd379..fe98abce97787 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java @@ -30,7 +30,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGetFuture; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -74,7 +74,7 @@ public void setupConf() { Client.setAsynchronousMode(true); } - static class AsyncCaller extends HadoopThread { + static class AsyncCaller extends SubjectInheritingThread { private Client client; private InetSocketAddress server; private int count; @@ -155,7 +155,7 @@ void assertReturnValues(long timeout, TimeUnit unit) * For testing the asynchronous calls of the RPC client * implemented with CompletableFuture. */ - static class AsyncCompletableFutureCaller extends HadoopThread { + static class AsyncCompletableFutureCaller extends SubjectInheritingThread { private final Client client; private final InetSocketAddress server; private final int count; @@ -205,7 +205,7 @@ public void assertReturnValues() } } - static class AsyncLimitlCaller extends HadoopThread { + static class AsyncLimitlCaller extends SubjectInheritingThread { private Client client; private InetSocketAddress server; private int count; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java index 57bdd2c7047b5..044c60fe07450 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java @@ -40,7 +40,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -152,7 +152,7 @@ public void assertCanTake(CallQueueManager cq, int numberOfTakes, int takeAttempts) throws InterruptedException { Taker taker = new Taker(cq, takeAttempts, -1); - Thread t = new HadoopThread(taker); + Thread t = new SubjectInheritingThread(taker); t.start(); t.join(100); @@ -165,7 +165,7 @@ public void assertCanPut(CallQueueManager cq, int numberOfPuts, int putAttempts) throws InterruptedException { Putter putter = new Putter(cq, putAttempts, -1); - Thread t = new HadoopThread(putter); + Thread t = new SubjectInheritingThread(putter); t.start(); t.join(100); @@ -278,7 +278,7 @@ public void testSwapUnderContention() throws InterruptedException { // Create putters and takers for (int i=0; i < 1000; i++) { Putter p = new Putter(manager, -1, -1); - Thread pt = new HadoopThread(p); + Thread pt = new SubjectInheritingThread(p); producers.add(p); threads.put(p, pt); @@ -287,7 +287,7 @@ public void testSwapUnderContention() throws InterruptedException { for (int i=0; i < 100; i++) { Taker t = new Taker(manager, -1, -1); - Thread tt = new HadoopThread(t); + Thread tt = new SubjectInheritingThread(t); consumers.add(t); threads.put(t, tt); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java index eb4c496e7c786..107a9f8587bf5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java @@ -50,7 +50,7 @@ import java.util.List; import java.util.concurrent.BlockingQueue; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; @@ -685,7 +685,7 @@ public void assertCanTake(BlockingQueue cq, int numberOfTakes, CountDownLatch latch = new CountDownLatch(numberOfTakes); Taker taker = new Taker(cq, takeAttempts, "default", latch); - Thread t = new HadoopThread(taker); + Thread t = new SubjectInheritingThread(taker); t.start(); latch.await(); @@ -699,7 +699,7 @@ public void assertCanPut(BlockingQueue cq, int numberOfPuts, CountDownLatch latch = new CountDownLatch(numberOfPuts); Putter putter = new Putter(cq, putAttempts, null, latch); - Thread t = new HadoopThread(putter); + Thread t = new SubjectInheritingThread(putter); t.start(); latch.await(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index d1882b203b045..b9a45fe28b825 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -103,7 +103,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -253,7 +253,7 @@ public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, } } - private static class SerialCaller extends HadoopThread { + private static class SerialCaller extends SubjectInheritingThread { private Client client; private InetSocketAddress server; private int count; @@ -997,7 +997,7 @@ private void checkBlocking(int readers, int readerQ, int callQ) throws Exception // instantiate the threads, will start in batches Thread[] threads = new Thread[clients]; for (int i=0; i future = new FutureTask(clientCallable); - Thread clientThread = new HadoopThread(future); + Thread clientThread = new SubjectInheritingThread(future); clientThread.start(); server.awaitInvocation(); @@ -147,7 +147,7 @@ public void testDeferredException() throws IOException, InterruptedException, new ClientCallable(serverAddress, conf, requestBytes); FutureTask future = new FutureTask(clientCallable); - Thread clientThread = new HadoopThread(future); + Thread clientThread = new SubjectInheritingThread(future); clientThread.start(); server.awaitInvocation(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java index 74529bd89fe61..50202214fe559 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java @@ -38,7 +38,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.SocksSocketFactory; import org.apache.hadoop.net.StandardSocketFactory; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -64,7 +64,7 @@ public class TestSocketFactory { private void startTestServer() throws Exception { // start simple tcp server. serverRunnable = new ServerRunnable(); - serverThread = new HadoopThread(serverRunnable); + serverThread = new SubjectInheritingThread(serverRunnable); serverThread.start(); final long timeout = System.currentTimeMillis() + START_STOP_TIMEOUT_SEC * 1000; while (!serverRunnable.isReady()) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java index 9efa9ff73c7f7..69a732686dfae 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java @@ -21,7 +21,7 @@ import java.util.ConcurrentModificationException; import java.util.concurrent.CountDownLatch; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -80,7 +80,7 @@ private void testEmptyBlocking(int awhile) throws Exception { final SinkQueue q = new SinkQueue(2); final Runnable trigger = mock(Runnable.class); // try consuming emtpy equeue and blocking - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { assertEquals(1, (int) q.dequeue(), "element"); @@ -256,7 +256,7 @@ private SinkQueue newSleepingConsumerQueue(int capacity, q.enqueue(i); } final CountDownLatch barrier = new CountDownLatch(1); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { Thread.sleep(10); // causes failure without barrier diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java index d1bb32d64f15a..0e5df58ded530 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java @@ -40,7 +40,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.Quantile; import org.apache.hadoop.thirdparty.com.google.common.math.Stats; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -210,7 +210,7 @@ interface TestProtocol { rates.add("metric" + i, 0); } - HadoopThread[] threads = new HadoopThread[n]; + SubjectInheritingThread[] threads = new SubjectInheritingThread[n]; final CountDownLatch firstAddsFinished = new CountDownLatch(threads.length); final CountDownLatch firstSnapshotsFinished = new CountDownLatch(1); final CountDownLatch secondAddsFinished = @@ -221,7 +221,7 @@ interface TestProtocol { final Random sleepRandom = new Random(seed); for (int tIdx = 0; tIdx < threads.length; tIdx++) { final int threadIdx = tIdx; - threads[threadIdx] = new HadoopThread() { + threads[threadIdx] = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java index 5872cf004fe71..7429e7e525a9a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java @@ -39,7 +39,7 @@ import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Timeout; import java.util.ArrayList; @@ -297,7 +297,7 @@ private static void updateThreadsAndWait(List threads, } } - static class TestThread extends HadoopThread { + static class TestThread extends SubjectInheritingThread { private volatile boolean exit = false; private boolean exited = false; @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index fb917b822e375..28b9bdaa26c89 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -47,7 +47,7 @@ import org.apache.hadoop.net.unix.DomainSocket.DomainChannel; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.io.Files; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -457,7 +457,7 @@ void testClientServer1(final Class writeStrategyClass, new ArrayBlockingQueue(2); final DomainSocket serv = (preConnectedSockets != null) ? null : DomainSocket.bindAndListen(TEST_PATH); - Thread serverThread = new HadoopThread() { + Thread serverThread = new SubjectInheritingThread() { public void work(){ // Run server DomainSocket conn = null; @@ -485,7 +485,7 @@ public void work(){ }; serverThread.start(); - HadoopThread clientThread = new HadoopThread() { + SubjectInheritingThread clientThread = new SubjectInheritingThread() { public void work(){ try { DomainSocket client = preConnectedSockets != null ? @@ -626,7 +626,7 @@ public void testFdPassing() throws Exception { for (int i = 0; i < passedFiles.length; i++) { passedFds[i] = passedFiles[i].getInputStream().getFD(); } - Thread serverThread = new HadoopThread() { + Thread serverThread = new SubjectInheritingThread() { public void work(){ // Run server DomainSocket conn = null; @@ -649,7 +649,7 @@ public void work(){ }; serverThread.start(); - Thread clientThread = new HadoopThread() { + Thread clientThread = new SubjectInheritingThread() { public void work(){ try { DomainSocket client = DomainSocket.connect(TEST_PATH); @@ -783,7 +783,7 @@ public void run() { } } }; - Thread readerThread = new HadoopThread(reader); + Thread readerThread = new SubjectInheritingThread(reader); readerThread.start(); socks[0].getOutputStream().write(1); socks[0].getOutputStream().write(2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java index 6d7a5a163fc49..58fe3f44ecf90 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java @@ -32,7 +32,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -129,7 +129,7 @@ public void testStress() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new HadoopThread(new Runnable() { + final Thread adderThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -156,7 +156,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new HadoopThread(new Runnable() { + final Thread removerThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { final Random random = new Random(); @@ -200,7 +200,7 @@ public void testStressInterruption() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new HadoopThread(new Runnable() { + final Thread adderThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -228,7 +228,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new HadoopThread(new Runnable() { + final Thread removerThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { final Random random = new Random(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java index 69f5ba65af560..136e28b56d4a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -43,7 +43,7 @@ public void testClearAuthorizationHeader() { public void testThreadLocalIsolation() throws Exception { byte[] mainHeader = "main-thread".getBytes(); AuthorizationContext.setCurrentAuthorizationHeader(mainHeader); - HadoopThread t = new HadoopThread(() -> { + SubjectInheritingThread t = new SubjectInheritingThread(() -> { Assertions.assertNull(AuthorizationContext.getCurrentAuthorizationHeader()); byte[] threadHeader = "other-thread".getBytes(); AuthorizationContext.setCurrentAuthorizationHeader(threadHeader); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index cb2687db17f3d..74f00f9d8e91f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -31,7 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.FakeTimer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -407,9 +407,9 @@ public void testOnlyOneRequestWhenNoEntryIsCached() throws Exception { FakeGroupMapping.clearBlackList(); FakeGroupMapping.setGetGroupsDelayMs(100); - ArrayList threads = new ArrayList(); + ArrayList threads = new ArrayList(); for (int i = 0; i < 10; i++) { - threads.add(new HadoopThread() { + threads.add(new SubjectInheritingThread() { public void work() { try { assertEquals(2, groups.getGroups("me").size()); @@ -452,9 +452,9 @@ public void testOnlyOneRequestWhenExpiredEntryExists() throws Exception { timer.advance(400 * 1000); Thread.sleep(100); - ArrayList threads = new ArrayList(); + ArrayList threads = new ArrayList(); for (int i = 0; i < 10; i++) { - threads.add(new HadoopThread() { + threads.add(new SubjectInheritingThread() { public void work() { try { assertEquals(2, groups.getGroups("me").size()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java index 464a1ae4878bd..15651cbbece58 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java @@ -59,7 +59,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.JavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -414,7 +414,7 @@ public void testLdapConnectionTimeout() // Below we create a LDAP server which will accept a client request; // but it will never reply to the bind (connect) request. // Client of this LDAP server is expected to get a connection timeout. - final Thread ldapServer = new HadoopThread(new Runnable() { + final Thread ldapServer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -469,7 +469,7 @@ public void testLdapReadTimeout() throws IOException, InterruptedException { // authenticate it successfully; but it will never reply to the following // query request. // Client of this LDAP server is expected to get a read timeout. - final Thread ldapServer = new HadoopThread(new Runnable() { + final Thread ldapServer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index 125c9e8b1dc80..626021b18c6b0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -33,7 +33,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -1024,7 +1024,7 @@ public Void run() throws Exception { }}); } - static class GetTokenThread extends HadoopThread { + static class GetTokenThread extends SubjectInheritingThread { boolean runThread = true; volatile ConcurrentModificationException cme = null; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java index 0121d44678e6e..67f6902e301bb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java @@ -25,7 +25,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.service.ServiceStateException; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -405,7 +405,7 @@ private AsyncSelfTerminatingService(int timeout) { @Override protected void serviceStart() throws Exception { - new HadoopThread(this).start(); + new SubjectInheritingThread(this).start(); super.serviceStart(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java index 6d508c668ca0b..6f917467f2b34 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,7 +59,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - Thread thread = new HadoopThread(this); + Thread thread = new SubjectInheritingThread(this); thread.setName(getName()); thread.start(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java index fb2910c1813d0..a8dbd395b82f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java @@ -21,7 +21,7 @@ import java.util.Set; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -176,7 +176,7 @@ public Iterable getTestThreads() { * A thread that can be added to a test context, and properly * passes exceptions through. */ - public static abstract class TestingThread extends HadoopThread { + public static abstract class TestingThread extends SubjectInheritingThread { protected final TestContext ctx; protected boolean stopped; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java index ae3a2ec46c152..1b6ddcc263afe 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java @@ -23,7 +23,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -60,7 +60,7 @@ public Deadlock() { } } - class DeadlockThread extends HadoopThread { + class DeadlockThread extends SubjectInheritingThread { private Lock lock1 = null; private Lock lock2 = null; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java index eebb8c334798d..54dade7fd39b5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java @@ -20,7 +20,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -57,7 +57,7 @@ public void testMultipleThread() throws Exception { AutoCloseableLock lock = new AutoCloseableLock(); lock.acquire(); assertTrue(lock.isLocked()); - HadoopThread competingThread = new HadoopThread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override public void work() { assertTrue(lock.isLocked()); @@ -82,7 +82,7 @@ public void testTryWithResourceSyntax() throws Exception { try(AutoCloseableLock localLock = lock.acquire()) { assertEquals(localLock, lock); assertTrue(lock.isLocked()); - HadoopThread competingThread = new HadoopThread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override public void work() { assertTrue(lock.isLocked()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java index 851776c770795..0ff2bdeafc388 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java @@ -26,7 +26,7 @@ import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -53,7 +53,7 @@ public void testMultipleThread(TestInfo testInfo) throws Exception { InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300); lock.lock(); try { - HadoopThread competingThread = new HadoopThread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override public void work() { assertFalse(lock.tryLock()); @@ -90,7 +90,7 @@ public void unlock() { AutoCloseableLock acl = new AutoCloseableLock(lock); try (AutoCloseable localLock = acl.acquire()) { assertEquals(acl, localLock); - HadoopThread competingThread = new HadoopThread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override public void work() { assertNotEquals(Thread.currentThread(), lockThread.get()); @@ -254,7 +254,7 @@ void logWaitWarning(long lockHeldTime, SuppressedSnapshot stats) { private Thread lockUnlockThread(Lock lock) throws InterruptedException { CountDownLatch countDownLatch = new CountDownLatch(1); - Thread t = new HadoopThread(() -> { + Thread t = new SubjectInheritingThread(() -> { try { assertFalse(lock.tryLock()); countDownLatch.countDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java index 085ac661e4b36..aaf75234b0ecc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java @@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; @@ -69,7 +69,7 @@ public void release() { final AutoCloseableLock readLock = new AutoCloseableLock( readWriteLock.readLock()); try (AutoCloseableLock lock = writeLock.acquire()) { - Thread competingWriteThread = new HadoopThread() { + Thread competingWriteThread = new SubjectInheritingThread() { @Override public void work() { assertFalse(writeLock.tryLock()); @@ -77,7 +77,7 @@ public void work() { }; competingWriteThread.start(); competingWriteThread.join(); - Thread competingReadThread = new HadoopThread() { + Thread competingReadThread = new SubjectInheritingThread() { @Override public void work() { assertFalse(readLock.tryLock()); @@ -105,7 +105,7 @@ public void testReadLock(TestInfo testInfo) throws Exception { final AutoCloseableLock writeLock = new AutoCloseableLock( readWriteLock.writeLock()); try (AutoCloseableLock lock = readLock.acquire()) { - HadoopThread competingReadThread = new HadoopThread() { + SubjectInheritingThread competingReadThread = new SubjectInheritingThread() { @Override public void work() { assertTrue(readLock.tryLock()); @@ -114,7 +114,7 @@ public void work() { }; competingReadThread.start(); competingReadThread.join(); - HadoopThread competingWriteThread = new HadoopThread() { + SubjectInheritingThread competingWriteThread = new SubjectInheritingThread() { @Override public void work() { assertFalse(writeLock.tryLock()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java index a8fafe7c0c22c..60ebc02398e93 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java @@ -29,7 +29,7 @@ import java.util.zip.CRC32; import java.util.zip.Checksum; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -317,7 +317,7 @@ private static BenchResult doBench(Class clazz, final int numThreads, final byte[] bytes, final int size) throws Exception { - final HadoopThread[] threads = new HadoopThread[numThreads]; + final SubjectInheritingThread[] threads = new SubjectInheritingThread[numThreads]; final BenchResult[] results = new BenchResult[threads.length]; { @@ -327,7 +327,7 @@ private static BenchResult doBench(Class clazz, for(int i = 0; i < threads.length; i++) { final int index = i; - threads[i] = new HadoopThread() { + threads[i] = new SubjectInheritingThread() { final Checksum crc = ctor.newInstance(); @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java index f106dac28f55e..89cc5eefa86cd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java @@ -31,7 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -71,9 +71,9 @@ private void doTestCache() { @Test public void testThreadSafe() throws Exception { - HadoopThread[] th = new HadoopThread[32]; + SubjectInheritingThread[] th = new SubjectInheritingThread[32]; for (int i=0; i() { public Void call() throws InterruptedException { - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { public void work() { childSubject = SubjectUtil.current(); } @@ -67,7 +67,7 @@ public void run() { } }; - HadoopThread t = new HadoopThread(r); + SubjectInheritingThread t = new SubjectInheritingThread(r); t.start(); t.join(1000); return (Void) null; diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/KMSBenchmark.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/KMSBenchmark.java index 9db71535511c2..118e63ca13797 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/KMSBenchmark.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/KMSBenchmark.java @@ -28,7 +28,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -326,7 +326,7 @@ void printStats() { /** * One of the threads that perform stats operations. */ - private class StatsDaemon extends HadoopThread { + private class StatsDaemon extends SubjectInheritingThread { private final int daemonId; private int opsPerThread; private String arg1; // argument passed to executeOp() diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index 113e5bf84c4c1..ed6c906cda90d 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -29,7 +29,7 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xbill.DNS.CNAMERecord; @@ -175,7 +175,7 @@ public RegistryDNS(String name) { @Override public Thread newThread(Runnable r) { - return new HadoopThread(r, + return new SubjectInheritingThread(r, "RegistryDNS " + counter.getAndIncrement()); } diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java index 64dd6116115b2..e42edd63815f6 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java @@ -37,7 +37,7 @@ import org.apache.hadoop.registry.client.types.RegistryPathStatus; import org.apache.hadoop.registry.client.types.ServiceRecord; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; @@ -116,7 +116,7 @@ public RegistryAdminService(String name, @Override public Thread newThread(Runnable r) { - return new HadoopThread(r, + return new SubjectInheritingThread(r, "RegistryAdminService " + counter.getAndIncrement()); } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java index 154eacb5cd3ab..6cbff92eab05d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -323,12 +323,12 @@ static void setDisabledProbeThreadForTest( @VisibleForTesting void startProbeScheduler() { probeDeadNodesSchedulerThr = - new HadoopThread(new ProbeScheduler(this, ProbeType.CHECK_DEAD)); + new SubjectInheritingThread(new ProbeScheduler(this, ProbeType.CHECK_DEAD)); probeDeadNodesSchedulerThr.setDaemon(true); probeDeadNodesSchedulerThr.start(); probeSuspectNodesSchedulerThr = - new HadoopThread(new ProbeScheduler(this, ProbeType.CHECK_SUSPECT)); + new SubjectInheritingThread(new ProbeScheduler(this, ProbeType.CHECK_SUSPECT)); probeSuspectNodesSchedulerThr.setDaemon(true); probeSuspectNodesSchedulerThr.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java index 6be57477976ec..91bbdefaeae40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java @@ -42,7 +42,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -144,7 +144,7 @@ public DatanodeAdminProperties[] call() throws Exception { } }); - Thread thread = new HadoopThread(futureTask); + Thread thread = new SubjectInheritingThread(futureTask); thread.start(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 87e8e93eacbbd..31b680774f762 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -45,7 +45,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -678,7 +678,7 @@ public long[] answer(InvocationOnMock invocation) throws Throwable { assertEquals(1, stats[0]); assertEquals(1, counter.get()); - Thread t = new HadoopThread() { + Thread t = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java index 80d2859684fa5..f07d67f0c0b6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java @@ -25,7 +25,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -227,7 +227,7 @@ static void waitForAll(List> furtures) throws Exception { } } - static class AllocatorThread extends HadoopThread { + static class AllocatorThread extends SubjectInheritingThread { private final ByteArrayManager bam; private final int arrayLength; private byte[] array; @@ -334,7 +334,7 @@ public void testByteArrayManager() throws Exception { } final List exceptions = new ArrayList(); - final Thread randomRecycler = new HadoopThread() { + final Thread randomRecycler = new SubjectInheritingThread() { @Override public void work() { LOG.info("randomRecycler start"); @@ -525,7 +525,7 @@ public void run() { Thread start(int n) { this.n = n; - final Thread t = new HadoopThread(this); + final Thread t = new SubjectInheritingThread(this); t.start(); return t; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java index d19e23d85d1cb..1298252740eb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java @@ -22,7 +22,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +48,7 @@ public AsyncDataService() { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new HadoopThread(threadGroup, r); + return new SubjectInheritingThread(threadGroup, r); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java index 48aae61bac06a..8a0242678e5ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java @@ -29,7 +29,7 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; // TODO: convert this to Junit public class TestUdpServer { @@ -69,14 +69,14 @@ public static void main(String[] args) throws InterruptedException { //testDump(); } - static class Runtest1 extends HadoopThread { + static class Runtest1 extends SubjectInheritingThread { @Override public void work() { testGetportMount(); } } - static class Runtest2 extends HadoopThread { + static class Runtest2 extends SubjectInheritingThread { @Override public void work() { testDump(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java index 3a9a17155416c..aaba9992a257c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +98,7 @@ private synchronized void updateSubclusterMapping() { if (subclusterMapping == null || (monotonicNow() - lastUpdated) > minUpdateTime) { // Fetch the mapping asynchronously - Thread updater = new HadoopThread(new Runnable() { + Thread updater = new SubjectInheritingThread(new Runnable() { @Override public void run() { final MembershipStore membershipStore = getMembershipStore(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 414936f8d9729..eb2a466081908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -36,7 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.eclipse.jetty.util.ajax.JSON; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -459,7 +459,7 @@ public void run() { /** * Thread that creates connections asynchronously. */ - static class ConnectionCreator extends HadoopThread { + static class ConnectionCreator extends SubjectInheritingThread { /** If the creator is running. */ private boolean running = true; /** Queue to push work to. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java index edf108bad8393..a7c1fb6caabb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -25,14 +25,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for updating mount table cache on all the router. */ -public class MountTableRefresherThread extends HadoopThread { +public class MountTableRefresherThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(MountTableRefresherThread.class); private boolean success; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 418e209a4fce5..40697fc14a4c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -59,7 +59,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -392,7 +392,7 @@ protected void serviceStop() throws Exception { * Shutdown the router. */ public void shutDown() { - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { Router.this.stop(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java index fb3bd83a521a7..96c5dd13d9d28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,7 +64,7 @@ public RouterHeartbeatService(Router router) { * Trigger the update of the Router state asynchronously. */ protected void updateStateAsync() { - Thread thread = new HadoopThread(this::updateStateStore, "Router Heartbeat Async"); + Thread thread = new SubjectInheritingThread(this::updateStateStore, "Router Heartbeat Async"); thread.setDaemon(true); thread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index fd1059d168a8b..e499e42b3c455 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -213,7 +213,7 @@ import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.checkerframework.checker.nullness.qual.NonNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -2508,7 +2508,7 @@ private static class AsyncThreadFactory implements ThreadFactory { @Override public Thread newThread(@NonNull Runnable r) { - Thread thread = new HadoopThread(r, namePrefix + threadNumber.getAndIncrement()); + Thread thread = new SubjectInheritingThread(r, namePrefix + threadNumber.getAndIncrement()); thread.setDaemon(true); return thread; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index d12d9dfcc9c19..2eaa71076d03b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -134,7 +134,7 @@ public void testConcurrentRefreshRequests() throws InterruptedException { // Spawn 100 concurrent refresh requests Thread[] threads = new Thread[100]; for (int i = 0; i < 100; i++) { - threads[i] = new HadoopThread(() -> + threads[i] = new SubjectInheritingThread(() -> client.refreshFairnessPolicyController(routerContext.getConf())); } @@ -183,7 +183,7 @@ public void testRefreshStaticChangeHandlers() throws Exception { final int newNs1Permits = 4; conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns0", newNs0Permits); conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns1", newNs1Permits); - Thread threadRefreshController = new HadoopThread(() -> client. + Thread threadRefreshController = new SubjectInheritingThread(() -> client. refreshFairnessPolicyController(routerContext.getConf())); threadRefreshController.start(); threadRefreshController.join(); @@ -219,7 +219,7 @@ private List makeDummyInvocations(RouterRpcClient client, final int nThr RemoteMethod dummyMethod = Mockito.mock(RemoteMethod.class); List threadAcquirePermits = new ArrayList<>(); for (int i = 0; i < nThreads; i++) { - Thread threadAcquirePermit = new HadoopThread(() -> { + Thread threadAcquirePermit = new SubjectInheritingThread(() -> { try { client.invokeSingle(namespace, dummyMethod); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index f7345c525e15b..0036d9044d064 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -45,7 +45,7 @@ import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -319,7 +319,7 @@ public void testCounter() throws Exception { int expectedSchedulerCount = rpcServer.getSchedulerJobCount() + 1; AtomicInteger maxSchedulerCount = new AtomicInteger(); AtomicBoolean watch = new AtomicBoolean(true); - Thread watcher = new HadoopThread(() -> { + Thread watcher = new SubjectInheritingThread(() -> { while (watch.get()) { int schedulerCount = rpcServer.getSchedulerJobCount(); if (schedulerCount > maxSchedulerCount.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 381eabd791e2c..648104a01e9a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -152,7 +152,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * The the RPC interface of the {@link Router} implemented by @@ -2394,7 +2394,7 @@ public void testCallerContextNotResetByAsyncHandler() throws IOException { String dirPath = "/test"; // The reason we start this child thread is that CallContext use InheritableThreadLocal. - HadoopThread t1 = new HadoopThread(() -> { + SubjectInheritingThread t1 = new SubjectInheritingThread(() -> { // Set flag async:true. CallerContext.setCurrent( new CallerContext.Builder("async:true").build()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java index 3a46b04b9420f..51fd4486a6e47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java @@ -24,7 +24,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * SyncClass implements BaseClass, providing a synchronous @@ -188,7 +188,7 @@ public String timeConsumingMethod(int input) { private ExecutorService getExecutorService() { return Executors.newFixedThreadPool(2, r -> { - HadoopThread t = new HadoopThread(r); + SubjectInheritingThread t = new SubjectInheritingThread(r); t.setDaemon(true); return t; }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 13e6364774fdc..1d680702cdcb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -130,7 +130,7 @@ import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; @@ -5641,7 +5641,7 @@ public int getBlockOpQueueLength() { return blockReportThread.queue.size(); } - private class BlockReportProcessingThread extends HadoopThread { + private class BlockReportProcessingThread extends SubjectInheritingThread { private long lastFull = 0; private final BlockingQueue queue; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 496e2a2f4d933..066d6d9a5c2bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hdfs.util.RwLockMode; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,7 +66,7 @@ * starts up, and at configurable intervals afterwards. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) -public class CacheReplicationMonitor extends HadoopThread implements Closeable { +public class CacheReplicationMonitor extends SubjectInheritingThread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(CacheReplicationMonitor.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java index c8d793b745049..4526d14a73b4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports.DiskOp; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Timer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -153,7 +153,7 @@ public void checkAndUpdateReportIfNecessary() { public void updateSlowDiskReportAsync(long now) { if (isUpdateInProgress.compareAndSet(false, true)) { lastUpdateTime = now; - new HadoopThread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { slowDisksReport = getSlowDisks(diskIDLatencyMap, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index c3269b923fb1e..2b53d3feb71a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -53,7 +53,7 @@ import org.apache.hadoop.io.nativeio.NativeIOException; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -849,7 +849,7 @@ private void deleteAsync(File curDir) throws IOException { deleteDir(curTmp); } rename(curDir, curTmp); - new HadoopThread("Async Delete Current.tmp") { + new SubjectInheritingThread("Async Delete Current.tmp") { public void work() { try { deleteDir(curTmp); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 31acda1f703a6..2ceca34edd954 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -75,7 +75,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.apache.hadoop.classification.VisibleForTesting; @@ -600,7 +600,7 @@ void start() { //Thread is started already return; } - bpThread = new HadoopThread(this); + bpThread = new SubjectInheritingThread(this); bpThread.setDaemon(true); // needed for JUnit testing if (lifelineSender != null) { @@ -1079,7 +1079,7 @@ public void run() { } public void start() { - lifelineThread = new HadoopThread(this, + lifelineThread = new SubjectInheritingThread(this, formatThreadName("lifeline", lifelineNnAddr)); lifelineThread.setDaemon(true); lifelineThread.setUncaughtExceptionHandler( @@ -1385,7 +1385,7 @@ public long monotonicNow() { /** * CommandProcessingThread that process commands asynchronously. */ - class CommandProcessingThread extends HadoopThread { + class CommandProcessingThread extends SubjectInheritingThread { private final BPServiceActor actor; private final BlockingQueue queue; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 4fde1d992d1c2..3a1b1e07f3682 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -262,7 +262,7 @@ import org.apache.hadoop.tracing.TraceUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tracing.Tracer; import org.eclipse.jetty.util.ajax.JSON; @@ -3856,7 +3856,7 @@ public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException // Asynchronously start the shutdown process so that the rpc response can be // sent back. - Thread shutdownThread = new HadoopThread("Async datanode shutdown thread") { + Thread shutdownThread = new SubjectInheritingThread("Async datanode shutdown thread") { @Override public void work() { if (!shutdownForUpgrade) { // Delay the shutdown a bit if not doing for restart. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index 35230be5aebc1..77e30b85675e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,7 +53,7 @@ * VolumeScanner scans a single volume. Each VolumeScanner has its own thread. *

They are all managed by the DataNode's BlockScanner. */ -public class VolumeScanner extends HadoopThread { +public class VolumeScanner extends SubjectInheritingThread { public static final Logger LOG = LoggerFactory.getLogger(VolumeScanner.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index 2aa5319c6e8f7..f6caea96346fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -110,7 +110,7 @@ public Thread newThread(Runnable r) { synchronized (this) { thisIndex = counter++; } - Thread t = new HadoopThread(r); + Thread t = new SubjectInheritingThread(r); t.setName("Async disk worker #" + thisIndex + " for volume " + volume); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index a9ecdd46bcb8e..a3d54865de048 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -49,7 +49,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; class FsVolumeList { private final CopyOnWriteArrayList volumes = @@ -261,7 +261,7 @@ void getAllVolumesMap(final String bpid, new ConcurrentHashMap(); List replicaAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new HadoopThread() { + Thread t = new SubjectInheritingThread() { public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + @@ -508,7 +508,7 @@ void addBlockPool(final String bpid, final Configuration conf) throws IOExceptio new ConcurrentHashMap(); List blockPoolAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new HadoopThread() { + Thread t = new SubjectInheritingThread() { public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Scanning block pool " + bpid + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java index e295db58d67b7..02b78b24ca2cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -83,7 +83,7 @@ private void addExecutorForVolume(final String storageId) { @Override public Thread newThread(Runnable r) { - Thread t = new HadoopThread(threadGroup, r); + Thread t = new SubjectInheritingThread(threadGroup, r); t.setName("Async RamDisk lazy persist worker " + " for volume with id " + storageId); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 8106334307630..8316a4c52293a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -79,7 +79,7 @@ private boolean isSyncThreadAlive() { private void startSyncThread() { synchronized(syncThreadLock) { if (!isSyncThreadAlive()) { - syncThread = new HadoopThread(this, this.getClass().getSimpleName()); + syncThread = new SubjectInheritingThread(this, this.getClass().getSimpleName()); syncThread.start(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 40ff0829730f0..27fd774c210f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -76,7 +76,7 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Preconditions; @@ -1247,7 +1247,7 @@ private synchronized void saveFSImageInAllDirs(FSNamesystem source, = storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { StorageDirectory sd = it.next(); FSImageSaver saver = new FSImageSaver(ctx, sd, nnf); - Thread saveThread = new HadoopThread(saver, saver.toString()); + Thread saveThread = new SubjectInheritingThread(saver, saver.toString()); saveThreads.add(saveThread); saveThread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index d69ca71759f2c..c84f3266dcfca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -75,7 +75,7 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @@ -185,7 +185,7 @@ public LoaderContext getLoaderContext() { * Thread to compute the MD5 of a file as this can be in parallel while * loading the image without interfering much. */ - private static class DigestThread extends HadoopThread { + private static class DigestThread extends SubjectInheritingThread { /** * Exception thrown when computing the digest if it cannot be calculated. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 47bb47466d6bc..fe66a7dbbbbbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -103,7 +103,7 @@ import org.apache.hadoop.util.GcTimeMonitor.Builder; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.util.Timer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1076,7 +1076,7 @@ public FileSystem run() throws IOException { return dfs; } }); - this.emptier = new HadoopThread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); + this.emptier = new SubjectInheritingThread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); this.emptier.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index f4c0252a88329..472f1536526cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -37,7 +37,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.Timer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -476,7 +476,7 @@ void sleep(long sleepTimeMillis) throws InterruptedException { * The thread which does the actual work of tailing edits journals and * applying the transactions to the FSNS. */ - private class EditLogTailerThread extends HadoopThread { + private class EditLogTailerThread extends SubjectInheritingThread { private volatile boolean shouldRun = true; private EditLogTailerThread() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index d310db425abe4..adc4caa64de5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -50,7 +50,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; @@ -387,7 +387,7 @@ private long countUncheckpointedTxns() { img.getStorage().getMostRecentCheckpointTxId(); } - private class CheckpointerThread extends HadoopThread { + private class CheckpointerThread extends SubjectInheritingThread { private volatile boolean shouldRun = true; private volatile long preventCheckpointsUntil = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index bc43045bbaeef..20b2641648cfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -51,7 +51,7 @@ import org.junit.jupiter.api.Test; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -449,7 +449,7 @@ void start() { Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING)); if (thread.get() == null) { - final Thread t = new HadoopThread(null, new Runnable() { + final Thread t = new SubjectInheritingThread(null, new Runnable() { @Override public void run() { for(State s; !(s = checkErrorState()).isTerminated;) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 2335202111c43..5462d190c105e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -57,7 +57,7 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -522,7 +522,7 @@ public boolean skipRollingRestartWait() { .getWrappedStream(); final AtomicBoolean running = new AtomicBoolean(true); final AtomicBoolean failed = new AtomicBoolean(false); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { public void work() { while (running.get()) { try { @@ -867,7 +867,7 @@ public Boolean get() { dataNodes[0].shutdown(); // Shutdown the second datanode when the pipeline is closing. - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { try { GenericTestUtils.waitFor(new Supplier() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 8027487f1a639..a1eb0c56af914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -90,7 +90,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -730,7 +730,7 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in Counter counter = new Counter(0); for (int i = 0; i < threads; ++i ) { DFSClientReader reader = new DFSClientReader(file1, cluster, hash_sha, fileLen, counter); - readers[i] = new HadoopThread(reader); + readers[i] = new SubjectInheritingThread(reader); readers[i].start(); } @@ -1019,7 +1019,7 @@ public static void namenodeRestartTest(final Configuration conf, assertFalse(HdfsUtils.isHealthy(uri)); //namenode is down, continue writing file4 in a thread - final Thread file4thread = new HadoopThread(new Runnable() { + final Thread file4thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1038,7 +1038,7 @@ public void run() { file4thread.start(); //namenode is down, read the file in a thread - final Thread reader = new HadoopThread(new Runnable() { + final Thread reader = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1058,7 +1058,7 @@ public void run() { //namenode is down, create another file in a thread final Path file3 = new Path(dir, "file"); - final Thread thread = new HadoopThread(new Runnable() { + final Thread thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1073,7 +1073,7 @@ public void run() { thread.start(); //restart namenode in a new thread - new HadoopThread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1126,7 +1126,7 @@ public void run() { assertFalse(HdfsUtils.isHealthy(uri)); //leave safe mode in a new thread - new HadoopThread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1307,7 +1307,7 @@ public void delayWhenRenewLeaseTimeout() { out1.write(new byte[256]); - Thread closeThread = new HadoopThread(new Runnable() { + Thread closeThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { //1. trigger get LeaseRenewer lock diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java index b4b40197c278d..f82bb16ee84c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java @@ -60,7 +60,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.DataChecksum; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -340,7 +340,7 @@ public void testCongestionAckDelay() { AtomicBoolean isDelay = new AtomicBoolean(true); // ResponseProcessor needs the dataQueue for the next step. - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { for (int i = 0; i < 10; i++) { // In order to ensure that other threads run for a period of time to prevent affecting // the results. @@ -377,7 +377,7 @@ public void testCongestionAckDelay() { // The purpose of adding packets to the dataQueue is to make the DataStreamer run // normally and judge whether to enter the sleep state according to the congestion. - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { for (int i = 0; i < 100; i++) { packet[i] = mock(DFSPacket.class); dataQueue.add(packet[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index df349550ac487..3188185a3e1c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -70,7 +70,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.slf4j.event.Level; @@ -949,7 +949,7 @@ public void testTailWithFresh() throws Exception { final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); - final HadoopThread tailer = new HadoopThread() { + final SubjectInheritingThread tailer = new SubjectInheritingThread() { @Override public void work() { final String[] argv = new String[]{"-tail", "-f", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index 7e2c1e4aa9068..1b49267ab8942 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -62,7 +62,7 @@ public class TestDatanodeDeath { // // an object that does a bunch of transactions // - static class Workload extends HadoopThread { + static class Workload extends SubjectInheritingThread { private final short replication; private final int numberOfFiles; private final int id; @@ -211,7 +211,7 @@ private static void checkData(byte[] actual, int from, byte[] expected, String m * a block do not get killed (otherwise the file will be corrupt and the * test will fail). */ - class Modify extends HadoopThread { + class Modify extends SubjectInheritingThread { volatile boolean running; final MiniDFSCluster cluster; final Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java index dfe072b41efca..c7436f0f2fe31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -457,7 +457,7 @@ public void sync() { } private void startWaitForDeadNodeThread(DFSClient dfsClient, int size) { - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { DeadNodeDetector deadNodeDetector = dfsClient.getClientContext().getDeadNodeDetector(); while (deadNodeDetector.clearAndGetDetectedDeadNodes().size() != size) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index cc93bd2b3bf88..9715083c373b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -86,7 +86,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -865,7 +865,7 @@ public void testDecommissionWithOpenfileReporting() closedFileSet, openFilesMap, maxDnOccurance); final AtomicBoolean stopRedundancyMonitor = new AtomicBoolean(false); - Thread monitorThread = new HadoopThread(new Runnable() { + Thread monitorThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while (!stopRedundancyMonitor.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index 164f677e231ce..8a8361661f089 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.MethodOrderer; @@ -260,7 +260,7 @@ public void testDecommissionWithURBlockForSameBlockGroup() throws Exception { // Decommission node in a new thread. Verify that node is decommissioned. final CountDownLatch decomStarted = new CountDownLatch(0); - HadoopThread decomTh = new HadoopThread() { + SubjectInheritingThread decomTh = new SubjectInheritingThread() { public void work() { try { decomStarted.countDown(); @@ -996,7 +996,7 @@ public void testDecommissionWithMissingBlock() throws Exception { // Handle decommission nodes in a new thread. // Verify that nodes are decommissioned. final CountDownLatch decomStarted = new CountDownLatch(0); - new HadoopThread( + new SubjectInheritingThread( () -> { try { decomStarted.countDown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index d6e3881db8c62..f74ac5ee33288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -42,7 +42,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -373,7 +373,7 @@ public void testSimpleAppend2() throws Exception { // // an object that does a bunch of appends to files // - class Workload extends HadoopThread { + class Workload extends SubjectInheritingThread { private final int id; private final MiniDFSCluster cluster; private final boolean appendToNewBlock; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java index f1906bc021a4f..936a90d9e4da0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.mockito.invocation.InvocationOnMock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -553,7 +553,7 @@ public HdfsFileStatus answer(InvocationOnMock invocation){ DFSClientAdapter.setDFSClient(fs, spyClient); // Create two threads for doing appends to the same file. - HadoopThread worker1 = new HadoopThread() { + SubjectInheritingThread worker1 = new SubjectInheritingThread() { @Override public void work() { try { @@ -563,7 +563,7 @@ public void work() { } }; - HadoopThread worker2 = new HadoopThread() { + SubjectInheritingThread worker2 = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index b2a3bac54f830..cf3d4a0c4d886 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -166,7 +166,7 @@ public void testRecoverFinalizedBlock() throws Throwable { // write 1/2 block AppendTestUtil.write(stm, 0, 4096); final AtomicReference err = new AtomicReference(); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { @@ -238,7 +238,7 @@ public void testCompleteOtherLeaseHoldersFile() throws Throwable { // write 1/2 block AppendTestUtil.write(stm, 0, 4096); final AtomicReference err = new AtomicReference(); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java index 77b640a532b85..77126a99d15e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java @@ -38,7 +38,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; @@ -222,7 +222,7 @@ public void testImmediateReadOfNewFile() final AtomicReference errorMessage = new AtomicReference(); final FSDataOutputStream out = fileSystem.create(file); - final Thread writer = new HadoopThread(new Runnable() { + final Thread writer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -242,7 +242,7 @@ public void run() { } }); - Thread opener = new HadoopThread(new Runnable() { + Thread opener = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -347,7 +347,7 @@ private void runTestUnfinishedBlockCRCError( final AtomicBoolean writerStarted = new AtomicBoolean(false); final AtomicBoolean error = new AtomicBoolean(false); - final Thread writer = new HadoopThread(new Runnable() { + final Thread writer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -380,7 +380,7 @@ public void run() { } } }); - Thread tailer = new HadoopThread(new Runnable() { + Thread tailer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java index 35eef25724ed0..6d1d6312515b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -114,7 +114,7 @@ public void testClientTriggeredLeaseRecovery() throws Exception { } } - static class SlowWriter extends HadoopThread { + static class SlowWriter extends SubjectInheritingThread { final FileSystem fs; final Path filepath; boolean running = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index 9bef9542ae666..023013c02dd1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -35,7 +35,7 @@ import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; /** @@ -72,7 +72,7 @@ private void initBuffer(int size) { toWrite = AppendTestUtil.randomBytes(seed, size); } - private class WriterThread extends HadoopThread { + private class WriterThread extends SubjectInheritingThread { private final FSDataOutputStream stm; private final AtomicReference thrown; private final int numWrites; @@ -163,7 +163,7 @@ public void testHflushWhileClosing() throws Throwable { final AtomicReference thrown = new AtomicReference(); try { for (int i = 0; i < 10; i++) { - HadoopThread flusher = new HadoopThread() { + SubjectInheritingThread flusher = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index a7d2472bba928..7536d7d4870fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -190,7 +190,7 @@ public int pRead(DFSInputStream dis, byte[] target, int startOff, int len) /** * A worker to do one "unit" of read. */ - static class ReadWorker extends HadoopThread { + static class ReadWorker extends SubjectInheritingThread { static public final int N_ITERATIONS = 1024; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java index cfda31d91d1bb..9b1b6c50b7e4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.log4j.Level; import org.junit.jupiter.api.Assertions; @@ -160,7 +160,7 @@ public void testInterruptReader() throws Exception { final FSDataInputStream in = fs.open(file); AtomicBoolean readInterrupted = new AtomicBoolean(false); - final Thread reader = new HadoopThread(new Runnable() { + final Thread reader = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java index f47b5bbb746e7..995652ef61c64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -245,7 +245,7 @@ static void sleepSeconds(final int waittime) throws InterruptedException { Thread.sleep(waittime * 1000L); } - static class SlowWriter extends HadoopThread { + static class SlowWriter extends SubjectInheritingThread { private final Path filepath; private final HdfsDataOutputStream out; private final long sleepms; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index bfc4bbcefcb88..966b585ec449b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -223,7 +223,7 @@ static void sleepSeconds(final int waittime) throws InterruptedException { Thread.sleep(waittime * 1000L); } - static class SlowWriter extends HadoopThread { + static class SlowWriter extends SubjectInheritingThread { final Path filepath; final HdfsDataOutputStream out; final long sleepms; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java index fb131fe780acf..a7392d2acd6ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java @@ -73,7 +73,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -236,7 +236,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new HadoopThread(readerRunnable); + threads[i] = new SubjectInheritingThread(readerRunnable); threads[i].start(); } Thread.sleep(500); @@ -335,7 +335,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new HadoopThread(readerRunnable); + threads[i] = new SubjectInheritingThread(readerRunnable); threads[i].start(); } gotFailureLatch.await(); @@ -641,7 +641,7 @@ public void run() { } } }; - Thread thread = new HadoopThread(readerRunnable); + Thread thread = new SubjectInheritingThread(readerRunnable); thread.start(); // While the thread is reading, send it interrupts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java index a81db9bc6f62a..d60da90155319 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java @@ -34,7 +34,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.Test; import java.lang.management.ManagementFactory; @@ -105,7 +105,7 @@ private long addOneDataNode(Configuration conf) throws Exception { } private Thread newBalancerService(Configuration conf, String[] args) { - return new HadoopThread(new Runnable() { + return new SubjectInheritingThread(new Runnable() { @Override public void run() { Tool cli = new Balancer.Cli(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 8d6fc050a5dc6..3838e1ccf9a40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; @@ -1523,7 +1523,7 @@ public void testAsyncIBR() throws Exception { Thread[] writers = new Thread[numWriters]; for (int i=0; i < writers.length; i++) { final Path p = new Path("/writer"+i); - writers[i] = new HadoopThread(new Runnable() { + writers[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index 38a1494c486db..5e748d5e02161 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -70,7 +70,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -916,7 +916,7 @@ private Block findBlock(Path path, long size) throws IOException { return ret; } - private class BlockChecker extends HadoopThread { + private class BlockChecker extends SubjectInheritingThread { final Path filePath; public BlockChecker(final Path filePath) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index 9ef31ccc5863f..9b08088847842 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -87,7 +87,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -338,7 +338,7 @@ public void blockUtilSendFullBlockReport() { }); countBlockReportItems(FAKE_BLOCK, mockNN1, blocks); - addNewBlockThread = new HadoopThread(() -> { + addNewBlockThread = new SubjectInheritingThread(() -> { for (int i = 0; i < totalTestBlocks; i++) { SimulatedFSDataset fsDataset = (SimulatedFSDataset) mockFSDataset; SimulatedStorage simulatedStorage = fsDataset.getStorages().get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 2afc98e8c36c8..8f298140908dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -96,7 +96,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -918,7 +918,7 @@ private void testStopWorker(final TestStopWorkerRunnable tswr) final RecoveringBlock recoveringBlock = Iterators.get(recoveringBlocks.iterator(), 0); final ExtendedBlock block = recoveringBlock.getBlock(); - Thread slowWriterThread = new HadoopThread(new Runnable() { + Thread slowWriterThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -945,7 +945,7 @@ public void run() { progressParent.uninterruptiblyAcquire(60000); // Start a worker thread which will attempt to stop the writer. - Thread stopWriterThread = new HadoopThread(new Runnable() { + Thread stopWriterThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java index 7365e1076e70b..a51f3864dbb35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.TestName; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -249,7 +249,7 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() final DataNode dataNode = cluster.getDataNodes().get(0); final AtomicBoolean recoveryInitResult = new AtomicBoolean(true); - Thread recoveryThread = new HadoopThread(() -> { + Thread recoveryThread = new SubjectInheritingThread(() -> { try { DatanodeInfo[] locations = block.getLocations(); final BlockRecoveryCommand.RecoveringBlock recoveringBlock = @@ -368,7 +368,7 @@ public void testEcRecoverBlocks() throws Throwable { // write 5MB File AppendTestUtil.write(stm, 0, 1024 * 1024 * 5); final AtomicReference err = new AtomicReference<>(); - Thread t = new HadoopThread(() -> { + Thread t = new SubjectInheritingThread(() -> { try { stm.close(); } catch (Throwable t1) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index b27faa1259d1d..40d7e5f86a5d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -51,7 +51,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -568,7 +568,7 @@ public void testAddVolumesConcurrently() // Thread to list all storage available at DataNode, // when the volumes are being added in parallel. - final HadoopThread listStorageThread = new HadoopThread(new Runnable() { + final SubjectInheritingThread listStorageThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while (addVolumeCompletionLatch.getCount() != newVolumeCount) { @@ -592,7 +592,7 @@ public void run() { public Object answer(InvocationOnMock invocationOnMock) throws Throwable { final Random r = new Random(); Thread addVolThread = - new HadoopThread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -929,7 +929,7 @@ public void logDelaySendingAckToUpstream( final DataNode dataNode = dn; final CyclicBarrier reconfigBarrier = new CyclicBarrier(2); - Thread reconfigThread = new HadoopThread(() -> { + Thread reconfigThread = new SubjectInheritingThread(() -> { try { reconfigBarrier.await(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index a20bf94b2ac07..ef5820bdf9d34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -82,7 +82,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -442,7 +442,7 @@ public void delayWhenOfferServiceHoldLock() { BPServiceActor actor = service.getBPServiceActors().get(0); DatanodeRegistration bpRegistration = actor.getBpRegistration(); - Thread register = new HadoopThread(() -> { + Thread register = new SubjectInheritingThread(() -> { try { service.registrationSucceeded(actor, bpRegistration); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java index 15c7f71f0922f..02b444df8b850 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -86,7 +86,7 @@ public void testBaseFunc() { @Test @Timeout(value = 5) public void testAcquireWriteLockError() throws InterruptedException { - Thread t = new HadoopThread(() -> { + Thread t = new SubjectInheritingThread(() -> { manager.readLock(LockLevel.BLOCK_POOl, "test"); manager.writeLock(LockLevel.BLOCK_POOl, "test"); }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java index 1e5b979789c8c..eea64bef1f859 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java @@ -34,7 +34,7 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -96,7 +96,7 @@ public NullDataNode(Configuration conf, OutputStream out, int port) throws any(StorageType.class), any(String.class), any(ExtendedBlock.class), anyBoolean()); - new HadoopThread(new NullServer(port)).start(); + new SubjectInheritingThread(new NullServer(port)).start(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index 6b4f4116a06ab..3dcff8c54aa17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.DataChecksum; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -382,7 +382,7 @@ public void testConcurrentAddBlockPool() throws InterruptedException, IOException { final String[] bpids = {"BP-TEST1-", "BP-TEST2-"}; final SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf); - class AddBlockPoolThread extends HadoopThread { + class AddBlockPoolThread extends SubjectInheritingThread { private int id; private IOException ioe; public AddBlockPoolThread(int id) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index df7e08fd8098b..30a90fac5cecc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -84,7 +84,7 @@ import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -649,7 +649,7 @@ public void testConcurrentWriteAndDeleteBlock() throws Exception { Random random = new Random(); // Random write block and delete half of them. for (int i = 0; i < threadCount; i++) { - HadoopThread thread = new HadoopThread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override public void work() { try { @@ -932,7 +932,7 @@ public void testRemoveVolumeBeingWritten() throws Exception { final CountDownLatch blockReportReceivedLatch = new CountDownLatch(1); final CountDownLatch volRemoveStartedLatch = new CountDownLatch(1); final CountDownLatch volRemoveCompletedLatch = new CountDownLatch(1); - class BlockReportThread extends HadoopThread { + class BlockReportThread extends SubjectInheritingThread { public void work() { // Lets wait for the volume remove process to start try { @@ -947,7 +947,7 @@ public void work() { } } - class ResponderThread extends HadoopThread { + class ResponderThread extends SubjectInheritingThread { public void work() { try (ReplicaHandler replica = dataset .createRbw(StorageType.DEFAULT, null, eb, false)) { @@ -974,7 +974,7 @@ public void work() { } } - class VolRemoveThread extends HadoopThread { + class VolRemoveThread extends SubjectInheritingThread { public void work() { Set volumesToRemove = new HashSet<>(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java index b4a06570f1c71..eedca0b5a7c42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java @@ -42,7 +42,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -387,7 +387,7 @@ public void testAddRplicaProcessorForAddingReplicaInMap() throws Exception { ExecutorService pool = Executors.newFixedThreadPool(10); List> futureList = new ArrayList<>(); for (int i = 0; i < 100; i++) { - HadoopThread thread = new HadoopThread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override public void work() { for (int j = 0; j < 10; j++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index c359edb649f60..e4110e436f960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ThreadUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -197,7 +197,7 @@ public void run() { Thread threads[] = new Thread[NUM_TASKS]; for (int i = 0; i < NUM_TASKS; i++) { - threads[i] = new HadoopThread(readerRunnable); + threads[i] = new SubjectInheritingThread(readerRunnable); threads[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java index 72783a401c700..1c715c755c9f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -176,7 +176,7 @@ public void testFsDatasetImplDeepCopyReplica() { modifyThread.setShouldRun(false); } - private class ModifyThread extends HadoopThread { + private class ModifyThread extends SubjectInheritingThread { private boolean shouldRun = true; @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index ef3a9e325fb44..07f11f4287c4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -720,7 +720,7 @@ public Object answer(InvocationOnMock invocation) { getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); final String newDirs = oldDirs.get(0); LOG.info("Reconfigure newDirs:" + newDirs); - HadoopThread reconfigThread = new HadoopThread() { + SubjectInheritingThread reconfigThread = new SubjectInheritingThread() { public void work() { try { LOG.info("Waiting for work plan creation!"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 6511b4407cc12..c404ed3707b45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -85,7 +85,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; /** @@ -423,7 +423,7 @@ void printStats() { /** * One of the threads that perform stats operations. */ - private class StatsDaemon extends HadoopThread { + private class StatsDaemon extends SubjectInheritingThread { private final int daemonId; private int opsPerThread; private String arg1; // argument passed to executeOp() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 7ccdd625306b6..4afaa049927b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -39,7 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -316,7 +316,7 @@ public void testAuditLoggerWithCallContext() throws IOException { .build(); CallerContext.setCurrent(context); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - Thread child = new HadoopThread(new Runnable() + Thread child = new SubjectInheritingThread(new Runnable() { @Override public void run() { @@ -343,7 +343,7 @@ public void run() { .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING)) .build(); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - child = new HadoopThread(new Runnable() + child = new SubjectInheritingThread(new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index c75b1e25840a3..62d1feb67c20d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -88,7 +88,7 @@ import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -2612,7 +2612,7 @@ private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary /** * A utility class to perform a checkpoint in a different thread. */ - private static class DoCheckpointThread extends HadoopThread { + private static class DoCheckpointThread extends SubjectInheritingThread { private final SecondaryNameNode snn; private volatile Throwable thrown = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index 68ce8abe4e872..102420ed26563 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -59,7 +59,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.Whitebox; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -109,7 +109,7 @@ private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception { "/"), "s1"); } - HadoopThread deleteThread = new DeleteThread(fs, filePath); + SubjectInheritingThread deleteThread = new DeleteThread(fs, filePath); deleteThread.start(); try { @@ -149,7 +149,7 @@ public DatanodeStorageInfo[] chooseTarget(String srcPath, } } - private class DeleteThread extends HadoopThread { + private class DeleteThread extends SubjectInheritingThread { private FileSystem fs; private Path path; @@ -178,7 +178,7 @@ public void work() { } } - private class RenameThread extends HadoopThread { + private class RenameThread extends SubjectInheritingThread { private FileSystem fs; private Path from; private Path to; @@ -457,14 +457,14 @@ public void testOpenRenameRace() throws Exception { // 6.release writeLock, it's fair lock so open thread gets read lock. // 7.open thread unlocks, rename gets write lock and does rename. // 8.rename thread unlocks, open thread gets write lock and update time. - Thread open = new HadoopThread(() -> { + Thread open = new SubjectInheritingThread(() -> { try { openSem.release(); fsn.getBlockLocations("foo", src, 0, 5); } catch (IOException e) { } }); - Thread rename = new HadoopThread(() -> { + Thread rename = new SubjectInheritingThread(() -> { try { openSem.acquire(); renameSem.release(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index b95f994193c9f..d7268e95fc7ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -90,7 +90,7 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.LogManager; import org.apache.log4j.spi.LoggingEvent; @@ -502,7 +502,7 @@ private void testEditLog(int initialSize) throws IOException { for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS, i*NUM_TRANSACTIONS); - threadId[i] = new HadoopThread(trans, "TransactionThread-" + i); + threadId[i] = new SubjectInheritingThread(trans, "TransactionThread-" + i); threadId[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 019118a039d28..beda93568e179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -67,7 +67,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.mockito.ArgumentMatcher; import org.slf4j.event.Level; import org.junit.jupiter.api.Test; @@ -206,7 +206,7 @@ private void startTransactionWorkers(MiniDFSCluster cluster, // Create threads and make them run transactions concurrently. for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(cluster, caughtErr); - new HadoopThread(trans, "TransactionThread-" + i).start(); + new SubjectInheritingThread(trans, "TransactionThread-" + i).start(); workers.add(trans); } } @@ -426,7 +426,7 @@ public void testSaveImageWhileSyncInProgress() throws Exception { new AtomicReference(); final CountDownLatch waitToEnterFlush = new CountDownLatch(1); - final HadoopThread doAnEditThread = new HadoopThread() { + final SubjectInheritingThread doAnEditThread = new SubjectInheritingThread() { @Override public void work() { try { @@ -519,7 +519,7 @@ public void testSaveRightBeforeSync() throws Exception { new AtomicReference(); final CountDownLatch sleepingBeforeSync = new CountDownLatch(1); - final HadoopThread doAnEditThread = new HadoopThread() { + final SubjectInheritingThread doAnEditThread = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index ee4a6ddda8b00..261c297fcf646 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -29,7 +29,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -269,7 +269,7 @@ public void testFSReadLockLongHoldingReport() throws Exception { // Track but do not Report if it's held for a long time when re-entering // read lock but time since last report does not exceed the suppress // warning interval - HadoopThread tLong = new HadoopThread() { + SubjectInheritingThread tLong = new SubjectInheritingThread() { @Override public void work() { fsnLock.readLock(); @@ -319,7 +319,7 @@ public void work() { logs.clearOutput(); final CountDownLatch barrier = new CountDownLatch(1); final CountDownLatch barrier2 = new CountDownLatch(1); - HadoopThread t1 = new HadoopThread() { + SubjectInheritingThread t1 = new SubjectInheritingThread() { @Override public void work() { try { @@ -333,7 +333,7 @@ public void work() { } } }; - HadoopThread t2 = new HadoopThread() { + SubjectInheritingThread t2 = new SubjectInheritingThread() { @Override public void work () { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java index 4f89d75b55ad2..68f9ca3b525a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java @@ -41,7 +41,7 @@ import org.apache.hadoop.metrics2.impl.ConfigBuilder; import org.apache.hadoop.metrics2.impl.TestMetricsConfig; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.eclipse.jetty.util.ajax.JSON; @@ -56,7 +56,7 @@ public class TestFSNamesystemMBean { * JMX properties. If it can access all the properties, the test is * considered successful. */ - private static class MBeanClient extends HadoopThread { + private static class MBeanClient extends SubjectInheritingThread { private boolean succeeded = false; @Override public void work() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index af87005630d78..4b507035fa662 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -68,7 +68,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -256,7 +256,7 @@ public void delay() { DataNodeFaultInjector.set(injector); // Truncate by using different client name. - Thread t = new HadoopThread(() -> { + Thread t = new SubjectInheritingThread(() -> { String hdfsCacheDisableKey = "fs.hdfs.impl.disable.cache"; boolean originCacheDisable = conf.getBoolean(hdfsCacheDisableKey, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index 49817e9a14999..3a7a882b8bd40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -166,7 +166,7 @@ protected void execute() throws Throwable { * implementation class, the thread is notified: other threads can wait * for it to terminate */ - private abstract class TestThread extends HadoopThread { + private abstract class TestThread extends SubjectInheritingThread { volatile Throwable thrown; protected volatile boolean live = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 93c85ad50e2c9..5f5bc122ba9a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.ChunkedArrayList; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -223,7 +223,7 @@ public void testListOpenFilesInHA() throws Exception { final AtomicBoolean failoverCompleted = new AtomicBoolean(false); final AtomicBoolean listOpenFilesError = new AtomicBoolean(false); final int listingIntervalMsec = 250; - Thread clientThread = new HadoopThread(new Runnable() { + Thread clientThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while(!failoverCompleted.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index dfbf0b46686c3..ca11df635c503 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -219,7 +219,7 @@ public void testMetaSaveOverwrite() throws Exception { } } - class MetaSaveThread extends HadoopThread { + class MetaSaveThread extends SubjectInheritingThread { NamenodeProtocols nnRpc; String filename; public MetaSaveThread(NamenodeProtocols nnRpc, String filename) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java index 8dd7a6e3d359f..6e10497352918 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java @@ -27,7 +27,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.StopWatch; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.test.Whitebox; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -178,7 +178,7 @@ public void testThrottleAccumulatingTasks() throws Exception { zst.addTask(mock); } - HadoopThread removeTaskThread = new HadoopThread() { + SubjectInheritingThread removeTaskThread = new SubjectInheritingThread() { public void work() { try { Thread.sleep(3000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index 4eef88fcd3798..6b6dcfe9da0ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -38,7 +38,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; @@ -137,7 +137,7 @@ public void testEditLog() throws IOException { Thread threadId[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS); - threadId[i] = new HadoopThread(trans, "TransactionThread-" + i); + threadId[i] = new SubjectInheritingThread(trans, "TransactionThread-" + i); threadId[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 0313a2e488c21..b43f8c9f89dfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -62,7 +62,7 @@ import org.junit.Test; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; public class TestBootstrapStandby { private static final Logger LOG = @@ -402,7 +402,7 @@ public void testRateThrottling() throws Exception { final int timeOut = (int)(imageFile.length() / minXferRatePerMS) + 1; // A very low DFS_IMAGE_TRANSFER_RATE_KEY value won't affect bootstrapping final AtomicBoolean bootStrapped = new AtomicBoolean(false); - new HadoopThread( + new SubjectInheritingThread( new Runnable() { @Override public void run() { @@ -432,7 +432,7 @@ public Boolean get() { // A very low DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY value should // cause timeout bootStrapped.set(false); - new HadoopThread( + new SubjectInheritingThread( new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java index 57be6dddce6ea..b38117f781a84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java @@ -55,7 +55,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -151,7 +151,7 @@ public void testMsyncSimple() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new HadoopThread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { // this read will block until roll and tail edits happen. dfs.getFileStatus(testPath); @@ -201,7 +201,7 @@ private void testMsync(boolean autoMsync, long autoMsyncPeriodMs) dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new HadoopThread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { // After msync, client should have the latest state ID from active. // Therefore, the subsequent getFileStatus call should succeed. @@ -290,7 +290,7 @@ public void testCallFromNewClient() throws Exception { (DistributedFileSystem) FileSystem.get(conf2); dfs2.getClient().getHAServiceState(); - Thread reader = new HadoopThread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { dfs2.getFileStatus(testPath); readStatus.set(1); @@ -331,7 +331,7 @@ public void testUncoordinatedCall() throws Exception { AtomicInteger readStatus = new AtomicInteger(0); // create a separate thread to make a blocking read. - Thread reader = new HadoopThread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { // this read call will block until server state catches up. But due to // configuration, this will take a very long time. @@ -436,7 +436,7 @@ public void testRpcQueueTimeNumOpsMetrics() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new HadoopThread(new Runnable() { + Thread reader = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 43307daff3e4e..926b020e7e0d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -260,7 +260,7 @@ public void testDelegationTokenDuringNNFailover() throws Exception { HAServiceState.STANDBY.toString(), e); } - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index e7e66490c9d73..67518f74e0e97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -68,7 +68,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -140,7 +140,7 @@ public void testClientRetrySafeMode() throws Exception { .getBlockManager()); assertTrue(nn0.getNamesystem().isInStartupSafeMode()); LOG.info("enter safemode"); - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 38592f30eb0f1..3386e432238b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -91,7 +91,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.RetryCache.CacheEntry; import org.apache.hadoop.util.LightWeightCache; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -1310,7 +1310,7 @@ public void testClientRetryWithFailover(final AtMostOnceOp op) // set DummyRetryInvocationHandler#block to true DummyRetryInvocationHandler.block.set(true); - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index c973e9f858344..bbf1c80b58243 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -53,7 +53,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ThreadUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Before; @@ -551,7 +551,7 @@ public void testReadsAllowedDuringCheckpoint() throws Exception { ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); // Perform an RPC that needs to take the write lock. - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index d361ad5215653..50fa813b7d61d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -701,7 +701,7 @@ public void testOpenFileWritingAcrossSnapDeletion() throws Exception { final AtomicBoolean writerError = new AtomicBoolean(false); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch deleteLatch = new CountDownLatch(1); - Thread t = new HadoopThread(new Runnable() { + Thread t = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index a14a228e8f98c..0b725b93d564c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -62,7 +62,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -564,9 +564,9 @@ public static void main(String[] args) throws Exception { long start = Time.now(); final int iteration = 20; - HadoopThread[] threads = new HadoopThread[threadCount]; + SubjectInheritingThread[] threads = new SubjectInheritingThread[threadCount]; for (int i = 0; i < threadCount; i++) { - threads[i] = new HadoopThread() { + threads[i] = new SubjectInheritingThread() { @Override public void work() { for (int i = 0; i < iteration; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java index c47adc8afbf9b..c3296ee7b4a07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.util; import org.apache.hadoop.hdfs.server.namenode.AclFeature; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -86,7 +86,7 @@ public void testRefCountMapConcurrently() throws Exception { assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2)); } - class PutThread extends HadoopThread { + class PutThread extends SubjectInheritingThread { private ReferenceCountMap referenceCountMap; PutThread(ReferenceCountMap referenceCountMap) { this.referenceCountMap = referenceCountMap; @@ -100,7 +100,7 @@ public void work() { } }; - class RemoveThread extends HadoopThread { + class RemoveThread extends SubjectInheritingThread { private ReferenceCountMap referenceCountMap; RemoveThread(ReferenceCountMap referenceCountMap) { this.referenceCountMap = referenceCountMap; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 7a8dd4d5a0529..48751695e48c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -57,7 +57,7 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.Whitebox; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.eclipse.jetty.util.ajax.JSON; @@ -297,7 +297,7 @@ public void testRetryWhileNNStartup() throws Exception { final NamenodeProtocols rpcServer = namenode.getRpcServer(); Whitebox.setInternalState(namenode, "rpcServer", null); - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { boolean result = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 769924fd39d54..d11dc5ec2be51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -51,7 +51,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Timeout; import org.opentest4j.TestAbortedException; @@ -326,7 +326,7 @@ public void testTwoStepWriteReadTimeout(TimeoutSource src) throws Exception { private void startSingleTemporaryRedirectResponseThread( final boolean consumeConnectionBacklog) { fs.connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; - serverThread = new HadoopThread() { + serverThread = new SubjectInheritingThread() { @Override public void work() { Socket clientSocket = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index 47d651c9566f6..1548bcc3c6bd7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -64,7 +64,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -152,7 +152,7 @@ public void serviceStart() throws Exception { HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder(). setDaemon(true).setNameFormat("uber-SubtaskRunner").build()); // create and start an event handling thread - eventHandler = new HadoopThread(new EventHandler(), "uber-EventHandler"); + eventHandler = new SubjectInheritingThread(new EventHandler(), "uber-EventHandler"); // if the job classloader is specified, set it onto the event handler as the // thread context classloader so that it can be used by the event handler // as well as the subtask runner threads diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 72b395f11906b..16f30c901773a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -65,7 +65,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; @@ -352,7 +352,7 @@ protected void serviceStart() throws Exception { } else if (timelineV2Client != null) { timelineV2Client.start(); } - eventHandlingThread = new HadoopThread(new Runnable() { + eventHandlingThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { JobHistoryEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 1663fc7e4316d..703f0b1f58778 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -134,7 +134,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -740,7 +740,7 @@ private class JobFinishEventHandler implements EventHandler { public void handle(JobFinishEvent event) { // Create a new thread to shutdown the AM. We should not do it in-line // to avoid blocking the dispatcher itself. - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java index c9c5e57e08cb1..194f844bd7118 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java @@ -33,7 +33,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.util.Clock; import org.slf4j.Logger; @@ -126,7 +126,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - lostTaskCheckerThread = new HadoopThread(new PingChecker()); + lostTaskCheckerThread = new SubjectInheritingThread(new PingChecker()); lostTaskCheckerThread.setName("TaskHeartbeatHandler PingChecker"); lostTaskCheckerThread.start(); super.serviceStart(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java index 82f28bf019a55..5e444eb1386f0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java @@ -47,7 +47,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -127,7 +127,7 @@ protected void serviceStart() throws Exception { ThreadFactory backingTf = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread thread = new HadoopThread(r); + Thread thread = new SubjectInheritingThread(r); thread.setContextClassLoader(jobClassLoader); return thread; } @@ -137,7 +137,7 @@ public Thread newThread(Runnable r) { ThreadFactory tf = tfBuilder.build(); launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new HadoopThread(new Runnable() { + eventHandlingThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { CommitterEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index 0724fed2561be..ee7fbde99393c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -43,7 +43,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -286,7 +286,7 @@ protected void serviceStart() throws Exception { Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new HadoopThread() { + eventHandlingThread = new SubjectInheritingThread() { @Override public void work() { ContainerLauncherEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index d94793150daea..ec5561f0ecf71 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -40,7 +40,7 @@ import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; @@ -301,7 +301,7 @@ public void run() { } protected void startAllocatorThread() { - allocatorThread = new HadoopThread(new AllocatorRunnable()); + allocatorThread = new SubjectInheritingThread(new AllocatorRunnable()); allocatorThread.setName("RMCommunicator Allocator"); allocatorThread.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index ae1ad1a7eca8b..a0f73b0b4ff68 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -62,7 +62,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -112,7 +112,7 @@ public class RMContainerAllocator extends RMContainerRequestor public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted " + "to make room for pending map attempts"; - private HadoopThread eventHandlingThread; + private SubjectInheritingThread eventHandlingThread; private final AtomicBoolean stopped; static { @@ -247,7 +247,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - this.eventHandlingThread = new HadoopThread() { + this.eventHandlingThread = new SubjectInheritingThread() { @SuppressWarnings("unchecked") @Override public void work() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java index 2a63dd876e595..7eb786c5e50e8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java @@ -45,7 +45,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; @@ -220,7 +220,7 @@ public void run() { } } }; - speculationBackgroundThread = new HadoopThread + speculationBackgroundThread = new SubjectInheritingThread (speculationBackgroundCore, "DefaultSpeculator background processing"); speculationBackgroundThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java index 717783964d805..3288390e62d2f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java @@ -39,7 +39,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -138,7 +138,7 @@ public void handle(ContainerAllocatorEvent event) { } @Override protected void serviceStart() throws Exception { - thread = new HadoopThread(new Runnable() { + thread = new SubjectInheritingThread(new Runnable() { @Override @SuppressWarnings("unchecked") public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java index 75b44642fa087..e0d2a323ee631 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java @@ -48,7 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -238,7 +238,7 @@ protected void unregister() { @Override protected void startAllocatorThread() { - allocatorThread = new HadoopThread(); + allocatorThread = new SubjectInheritingThread(); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java index fe4ca80b8c722..c0e149b134840 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java @@ -72,7 +72,7 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -115,7 +115,7 @@ public ProtocolSignature getProtocolSignature(String protocol, this, protocol, clientVersion, clientMethodsHash); } - private class Job extends HadoopThread implements TaskUmbilicalProtocol { + private class Job extends SubjectInheritingThread implements TaskUmbilicalProtocol { // The job directory on the system: JobClient places job configurations here. // This is analogous to JobTracker's system directory. private Path systemJobDir; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java index abbce2eda75bd..4c217a4c7a032 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; class CleanupQueue { @@ -101,7 +101,7 @@ protected boolean isQueueEmpty() { return (cleanupThread.queue.size() == 0); } - private static class PathCleanupThread extends HadoopThread { + private static class PathCleanupThread extends SubjectInheritingThread { // cleanup queue which deletes files/directories of the paths queued up. private LinkedBlockingQueue queue = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 5ebe61df6e967..f41c94bf4b58b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -74,7 +74,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1550,7 +1550,7 @@ public void flush() throws IOException, ClassNotFoundException, public void close() { } - protected class SpillThread extends HadoopThread { + protected class SpillThread extends SubjectInheritingThread { @Override public void work() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java index 7820f49184d88..44a4b41ef85a7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java @@ -72,7 +72,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -953,7 +953,7 @@ void resetDoneFlag() { } public void startCommunicationThread() { if (pingThread == null) { - pingThread = new HadoopThread(this, "communication thread"); + pingThread = new SubjectInheritingThread(this, "communication thread"); pingThread.setDaemon(true); pingThread.start(); } @@ -964,7 +964,7 @@ public void startDiskLimitCheckerThreadIfNeeded() { MRJobConfig.JOB_SINGLE_DISK_LIMIT_BYTES, MRJobConfig.DEFAULT_JOB_SINGLE_DISK_LIMIT_BYTES) >= 0) { try { - diskLimitCheckThread = new HadoopThread(new DiskLimitCheck(conf), + diskLimitCheckThread = new SubjectInheritingThread(new DiskLimitCheck(conf), "disk limit check thread"); diskLimitCheckThread.setDaemon(true); diskLimitCheckThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java index a07dc0f1c394d..68ae9e97f19d1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java @@ -57,7 +57,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -281,7 +281,7 @@ public static String createDigest(byte[] password, String data) } @VisibleForTesting - public static class PingSocketCleaner extends HadoopThread { + public static class PingSocketCleaner extends SubjectInheritingThread { private final ServerSocket serverSocket; private final int soTimeout; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java index 6b9530ba41261..fdfe07768742a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java @@ -42,7 +42,7 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -96,7 +96,7 @@ private enum MessageType { START(0), private static class UplinkReaderThread - extends HadoopThread { + extends SubjectInheritingThread { private DataInputStream inStream; private UpwardProtocol handler; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java index 3e51bac25b11a..61521765d6173 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java @@ -40,7 +40,7 @@ import org.apache.hadoop.mapreduce.lib.map.WrappedMapper; import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * The Chain class provides all the common functionality for the @@ -297,7 +297,7 @@ private synchronized boolean setIfUnsetThrowable(Throwable th) { return false; } - private class MapRunner extends HadoopThread { + private class MapRunner extends SubjectInheritingThread { private Mapper mapper; private Mapper.Context chainContext; private RecordReader rr; @@ -330,7 +330,7 @@ public void work() { } } - private class ReduceRunner extends HadoopThread { + private class ReduceRunner extends SubjectInheritingThread { private Reducer reducer; private Reducer.Context chainContext; private RecordWriter rw; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java index c5b66dbd333e1..cb48a0e4a3435 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java @@ -19,7 +19,7 @@ package org.apache.hadoop.mapreduce.lib.map; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -248,7 +248,7 @@ public float getProgress() { } } - private class MapRunner extends HadoopThread { + private class MapRunner extends SubjectInheritingThread { private Mapper mapper; private Context subcontext; private Throwable throwable; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java index 6fac8ff401714..1857406744607 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java @@ -23,11 +23,11 @@ import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class EventFetcher extends HadoopThread { +class EventFetcher extends SubjectInheritingThread { private static final long SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; private static final int RETRY_PERIOD = 5000; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index 03a4569d40672..da598e807e2fb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -48,7 +48,7 @@ import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,7 +56,7 @@ import org.apache.hadoop.classification.VisibleForTesting; @VisibleForTesting -public class Fetcher extends HadoopThread { +public class Fetcher extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(Fetcher.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java index 1022b574f27df..9bf3edc1a7470 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java @@ -26,11 +26,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -abstract class MergeThread extends HadoopThread { +abstract class MergeThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(MergeThread.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java index be379cba18116..5e0dc0b3ba8c1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java @@ -49,7 +49,7 @@ import org.apache.hadoop.mapreduce.task.reduce.MapHost.State; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -555,7 +555,7 @@ public int compareTo(Delayed o) { /** * A thread that takes hosts off of the penalty list when the timer expires. */ - private class Referee extends HadoopThread { + private class Referee extends SubjectInheritingThread { public Referee() { setName("ShufflePenaltyReferee"); setDaemon(true); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java index 7703fca7e61b5..473dff3b9430f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -327,7 +327,7 @@ public static boolean isProcessGroupAlive(String pgrpId) { /** * Helper thread class that kills process-tree with SIGKILL in background */ - static class SigKillThread extends HadoopThread { + static class SigKillThread extends SubjectInheritingThread { private String pid = null; private boolean isProcessGroup = false; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java index d2f1a5fc69a5a..0b79d56de9d37 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.mapreduce.MRJobConfig; import org.junit.jupiter.api.BeforeEach; @@ -222,7 +222,7 @@ public void testRemoveMap() throws Exception { // run multiple times for (int i = 0; i < 20; ++i) { - Thread getInfoThread = new HadoopThread() { + Thread getInfoThread = new SubjectInheritingThread() { @Override public void work() { try { @@ -232,7 +232,7 @@ public void work() { } } }; - Thread removeMapThread = new HadoopThread() { + Thread removeMapThread = new SubjectInheritingThread() { @Override public void work() { cache.removeMap("bigIndex"); @@ -267,7 +267,7 @@ public void testCreateRace() throws Exception { // run multiple instances Thread[] getInfoThreads = new Thread[50]; for (int i = 0; i < 50; i++) { - getInfoThreads[i] = new HadoopThread() { + getInfoThreads[i] = new SubjectInheritingThread() { @Override public void work() { try { @@ -286,7 +286,7 @@ public void work() { final Thread mainTestThread = Thread.currentThread(); - Thread timeoutThread = new HadoopThread() { + Thread timeoutThread = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java index 1b2aabaf72b74..0de980215c658 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.test.AbstractHadoopTestBase; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -78,7 +78,7 @@ public boolean accept(Path path) { } }, true); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java index 46aef150978e3..f991756241941 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java @@ -34,7 +34,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.checkpoint.TaskCheckpointID; import org.apache.hadoop.util.ExitUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -253,7 +253,7 @@ public void uncaughtException(Thread th, Throwable ex) { task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); reporter.startDiskLimitCheckerThreadIfNeeded(); - Thread t = new HadoopThread(reporter); + Thread t = new SubjectInheritingThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); t.start(); @@ -274,7 +274,7 @@ public void testTaskProgress() throws Exception { Task task = new DummyTask(); task.setConf(job); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new HadoopThread(reporter); + Thread t = new SubjectInheritingThread(reporter); t.start(); Thread.sleep(2100); task.setTaskDone(); @@ -329,7 +329,7 @@ public void uncaughtException(Thread th, Throwable ex) { Task task = new DummyTask(); task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new HadoopThread(reporter); + Thread t = new SubjectInheritingThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java index 510a3469c9a63..ae6f488a6a6e2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java @@ -41,7 +41,7 @@ import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; @@ -185,7 +185,7 @@ public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() dfsCluster.getFileSystem().setSafeMode( SafeModeAction.ENTER); assertTrue(dfsCluster.getFileSystem().isInSafeMode()); - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { @@ -210,7 +210,7 @@ public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout() assertTrue(dfsCluster.getFileSystem().isInSafeMode()); final ControlledClock clock = new ControlledClock(); clock.setTime(1); - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java index 68d4ba48cd950..08559399dbd43 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java @@ -38,7 +38,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.service.Service; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.EventHandler; import org.junit.jupiter.api.Test; @@ -255,7 +255,7 @@ protected EventHandler createJobHistoryHandler( @Override protected void serviceStart() { // Don't start any event draining thread. - super.eventHandlingThread = new HadoopThread(); + super.eventHandlingThread = new SubjectInheritingThread(); super.eventHandlingThread.start(); } }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java index 127e33b8cd68e..b18e0c041da86 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java @@ -26,7 +26,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.junit.jupiter.api.AfterAll; @@ -107,7 +107,7 @@ public void testTwoThreadsQueryingDifferentJobOfSameUser() * files in one child thread. */ createJhistFile(job1); - webRequest1 = new HadoopThread( + webRequest1 = new SubjectInheritingThread( new Runnable() { @Override public void run() { @@ -137,7 +137,7 @@ public void run() { * will also see the job history files for job1. */ createJhistFile(job2); - webRequest2 = new HadoopThread( + webRequest2 = new SubjectInheritingThread( new Runnable() { @Override public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java index 91be7e6b9dbd4..3c36f13dcc3e6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java @@ -22,7 +22,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Fails the Mapper. First attempt throws exception. Rest do System.exit. @@ -34,7 +34,7 @@ public void map(Text key, Text value, // Just create a non-daemon thread which hangs forever. MR AM should not be // hung by this. - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { synchronized (this) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java index f1f28487e1aef..c9c1cc482564a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java @@ -44,7 +44,7 @@ import org.apache.hadoop.mapred.*; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -179,7 +179,7 @@ enum StatSeries { public String toString() {return statName;} } - private static class FileCreateDaemon extends HadoopThread { + private static class FileCreateDaemon extends SubjectInheritingThread { private static final int NUM_CREATE_THREADS = 10; private static volatile int numFinishedThreads; private static volatile int numRunningThreads; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java index 83d84568598f9..de6668ffdf392 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java @@ -48,7 +48,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -309,7 +309,7 @@ public void configure(JobConf job) { getArgsFromConfiguration(jobConf); } - private class ProgressThread extends HadoopThread { + private class ProgressThread extends SubjectInheritingThread { boolean keepGoing; // while this is true, thread runs. private Reporter reporter; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java index f956dfe89a781..0952f089eda43 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java @@ -39,7 +39,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -201,7 +201,7 @@ private void checkJobExitStatus(int status, String jobName) { private void runTest(final JobClient jc, final Configuration conf, final String jobClass, final String[] args, KillTaskThread killTaskThread, KillTrackerThread killTrackerThread) throws Exception { - HadoopThread t = new HadoopThread("Job Test") { + SubjectInheritingThread t = new SubjectInheritingThread("Job Test") { public void work() { try { Class jobClassObj = conf.getClassByName(jobClass); @@ -250,7 +250,7 @@ public void work() { t.join(); } - private class KillTrackerThread extends HadoopThread { + private class KillTrackerThread extends SubjectInheritingThread { private volatile boolean killed = false; private JobClient jc; private RunningJob rJob; @@ -393,7 +393,7 @@ private String convertTrackerNameToHostName(String trackerName) { } - private class KillTaskThread extends HadoopThread { + private class KillTaskThread extends SubjectInheritingThread { private volatile boolean killed = false; private RunningJob rJob; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java index 43c692c7dd6ea..35b3d14552630 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java @@ -21,7 +21,7 @@ import org.apache.hadoop.io.*; import org.apache.hadoop.mapred.UtilsForTests.RandomInputFormat; import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import java.io.*; @@ -52,7 +52,7 @@ public void map(Text key, Text val, final OutputCollector out, Reporter reporter) throws IOException { // Class for calling collect in separate threads - class CollectFeeder extends HadoopThread { + class CollectFeeder extends SubjectInheritingThread { int id; // id for the thread public CollectFeeder(int id) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java index 2144d275aae6a..0d55f3f986e80 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java @@ -30,7 +30,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -111,7 +111,7 @@ public static void doJobControlTest() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new HadoopThread(theControl); + Thread theController = new SubjectInheritingThread(theControl); theController.start(); while (!theControl.allFinished()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java index efa4c6d325670..f64a5270891f7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,7 +116,7 @@ public void testLocalJobControlDataCopy() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new HadoopThread(theControl); + Thread theController = new SubjectInheritingThread(theControl); theController.start(); while (!theControl.allFinished()) { LOG.debug("Jobs in waiting state: " + theControl.getWaitingJobs().size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java index da4222e08269a..1bcdd26a1cbc8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java @@ -30,7 +30,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -318,7 +318,7 @@ public void testMultiMaps() throws Exception { FileOutputFormat.setOutputPath(job, outputPath); final Thread toInterrupt = Thread.currentThread(); - HadoopThread interrupter = new HadoopThread() { + SubjectInheritingThread interrupter = new SubjectInheritingThread() { public void work() { try { Thread.sleep(120*1000); // 2m diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java index f9ef635183521..431eac646feaf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java @@ -28,7 +28,7 @@ import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -113,7 +113,7 @@ private JobControl createDependencies(Configuration conf, Job job1) theControl.addJob(cjob2); theControl.addJob(cjob3); theControl.addJob(cjob4); - Thread theController = new HadoopThread(theControl); + Thread theController = new SubjectInheritingThread(theControl); theController.start(); return theControl; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java index 4b48e50a876ed..efa402a9efff5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; /** @@ -149,7 +149,7 @@ private ControlledJob createFailedControlledJob(JobControl jobControl, } private void runJobControl(JobControl jobControl) { - Thread controller = new HadoopThread(jobControl); + Thread controller = new SubjectInheritingThread(jobControl); controller.start(); waitTillAllFinished(jobControl); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index a82d07fc844e3..77f3322977ece 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -45,7 +45,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JarFinder; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.MiniYARNCluster; @@ -267,7 +267,7 @@ public synchronized void serviceStart() throws Exception { } historyServer = new JobHistoryServer(); historyServer.init(getConfig()); - new HadoopThread() { + new SubjectInheritingThread() { public void work() { historyServer.start(); }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java index 33a0088454fb5..5e0d1667c11b0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java @@ -22,7 +22,7 @@ import org.apache.hadoop.mapred.Task.TaskReporter; import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,7 +86,7 @@ public synchronized void start() { // init counters used by native side, // so they will have correct display name initUsedCounters(); - checker = new HadoopThread(this); + checker = new SubjectInheritingThread(this); checker.setDaemon(true); checker.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java index 8d8ee453aa58e..5b558b391fa04 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java @@ -42,7 +42,7 @@ import org.apache.hadoop.util.IndexedSortable; import org.apache.hadoop.util.QuickSort; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.functional.FutureIO; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY; @@ -146,7 +146,7 @@ public static void writePartitionFile(final JobContext job, for(int i=0; i < samples; ++i) { final int idx = i; samplerReader[i] = - new HadoopThread (threadGroup, "Sampler Reader " + idx) { + new SubjectInheritingThread (threadGroup, "Sampler Reader " + idx) { { setDaemon(true); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java index 18bb0944792ab..2665013a5b455 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java @@ -38,7 +38,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticsContext; import org.apache.hadoop.fs.statistics.impl.IOStatisticsContextImpl; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.functional.CloseableTaskPoolSubmitter; import org.apache.hadoop.util.functional.TaskPool; @@ -458,7 +458,7 @@ public void testListingThroughTaskPool() throws Throwable { * If constructed with an IOStatisticsContext then * that context is switched to before performing the IO. */ - private class TestWorkerThread extends HadoopThread implements Runnable { + private class TestWorkerThread extends SubjectInheritingThread implements Runnable { private final Path workerThreadPath; private final IOStatisticsContext ioStatisticsContext; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java index 7a147df00e3e3..2b3cf8c829599 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java @@ -37,7 +37,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.test.tags.ScaleTest; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -135,7 +135,7 @@ concurrentRenames, new ThreadFactory() { private AtomicInteger count = new AtomicInteger(0); public Thread newThread(Runnable r) { - return new HadoopThread(r, + return new SubjectInheritingThread(r, "testParallelRename" + count.getAndIncrement()); } }); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java index b3c9c800470fa..98d28d1ea421a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java @@ -27,7 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -257,7 +257,7 @@ public AzureFileSystemThreadFactory(String prefix) { @Override public Thread newThread(Runnable r) { - Thread t = new HadoopThread(r); + Thread t = new SubjectInheritingThread(r); // Use current thread name as part in naming thread such that use of // same file system object will have unique names. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java index 553ee4a969eb9..c19bbe48fc3f6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java @@ -44,7 +44,7 @@ import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSExceptionMessages; @@ -822,7 +822,7 @@ class UploaderThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { - Thread t = new HadoopThread(r); + Thread t = new SubjectInheritingThread(r); t.setName(String.format("%s-%d", THREAD_ID_PREFIX, threadSequenceNumber.getAndIncrement())); return t; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 19c67006c3cbf..74e94ef9da55c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azure; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.VisibleForTesting; import com.microsoft.azure.storage.AccessCondition; @@ -105,7 +105,7 @@ public SelfRenewingLease(CloudBlobWrapper blobWrapper, boolean throwIfPresent) } } } - renewer = new HadoopThread(new Renewer()); + renewer = new SubjectInheritingThread(new Renewer()); // A Renewer running should not keep JVM from exiting, so make it a daemon. renewer.setDaemon(true); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java index 721ce68e837ff..d0a1bd0e7fb63 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java @@ -22,7 +22,7 @@ import java.util.Date; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Internal implementation class to help calculate the current bytes @@ -68,7 +68,7 @@ public BandwidthGaugeUpdater(AzureFileSystemInstrumentation instrumentation, this.windowSizeMs = windowSizeMs; this.instrumentation = instrumentation; if (!manualUpdateTrigger) { - uploadBandwidthUpdater = new HadoopThread(new UploadBandwidthUpdater(), THREAD_NAME); + uploadBandwidthUpdater = new SubjectInheritingThread(new UploadBandwidthUpdater(), THREAD_NAME); uploadBandwidthUpdater.setDaemon(true); uploadBandwidthUpdater.start(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java index 56f2a2ba9c6ef..7a0e42c46f257 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java @@ -41,7 +41,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.ROOT_PATH; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_LIST_MAX_RESULTS; @@ -152,7 +152,7 @@ public boolean listRecursiveAndTakeAction() Thread producerThread = null; try { ListBlobQueue listBlobQueue = createListBlobQueue(configuration); - producerThread = new HadoopThread(() -> { + producerThread = new SubjectInheritingThread(() -> { try { produceConsumableList(listBlobQueue); } catch (AzureBlobFileSystemException e) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java index 190d54dce9e5f..84f07e540e144 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java @@ -27,7 +27,7 @@ import java.util.concurrent.CountDownLatch; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.VisibleForTesting; /** @@ -93,7 +93,7 @@ void init() { getFreeList().add(i); } for (int i = 0; i < NUM_THREADS; i++) { - Thread t = new HadoopThread(new ReadBufferWorker(i, this)); + Thread t = new SubjectInheritingThread(new ReadBufferWorker(i, this)); t.setDaemon(true); threads[i] = t; t.setName("ABFS-prefetch-" + i); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java index b15ea54646a63..f197032a5fd27 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; final class ReadBufferManagerV2 extends ReadBufferManager { @@ -214,7 +214,7 @@ public void testMimicFullUseAndAddFailedBuffer(final ReadBuffer buf) { private int count = 0; @Override public Thread newThread(Runnable r) { - return new HadoopThread(r, "ReadAheadV2-Thread-" + count++); + return new SubjectInheritingThread(r, "ReadAheadV2-Thread-" + count++); } }; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java index 3fa9f49edd2db..c008d64386bf8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.azure.integration.AzureTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Handle OOB IO into a shared container. @@ -75,7 +75,7 @@ public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) { * Start writing blocks to Azure storage. */ public void startWriting() { - runner = new HadoopThread(this); // Create the block writer thread. + runner = new SubjectInheritingThread(this); // Create the block writer thread. runner.start(); // Start the block writer thread. } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java index a1fcf43972f97..19080d031b6c9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java @@ -31,7 +31,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*; @@ -95,7 +95,7 @@ public void testMultiThreadedBlockBlobReadScenario() throws Throwable { Path testFilePath1 = new Path(base, "test1.dat"); Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new HadoopThread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -122,7 +122,7 @@ public void testMultiThreadBlockBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new HadoopThread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -143,7 +143,7 @@ public void testMultiThreadedPageBlobSetPermissionScenario() createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new HadoopThread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -162,7 +162,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new HadoopThread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -180,7 +180,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() public void testMultiThreadedPageBlobOpenScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new HadoopThread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { inputStream = fs.open(testPath); @@ -201,7 +201,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new HadoopThread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { @@ -220,7 +220,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new HadoopThread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -238,7 +238,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new HadoopThread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -254,7 +254,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createTestFolder(createTestAccount(), testFolderPath); - Thread t = new HadoopThread(new DeleteThread(fs, testFolderPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -272,7 +272,7 @@ public void testMultiThreadedPageBlobListStatusScenario() throws Throwable { createTestFolder( getPageBlobTestStorageAccount(), testFolderPath); - Thread t = new HadoopThread(new DeleteThread(fs, testFolderPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -294,7 +294,7 @@ public void testMultiThreadedPageBlobReadScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new HadoopThread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -319,7 +319,7 @@ public void testMultiThreadedPageBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new HadoopThread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java index 261ba5b57512e..fd2ba05a35ac4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -68,7 +68,7 @@ public void testMultipleRenameFileOperationsToSameDestination() for (int i = 0; i < 10; i++) { final int threadNumber = i; Path src = path("test" + threadNumber); - threads.add(new HadoopThread(() -> { + threads.add(new SubjectInheritingThread(() -> { try { latch.await(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e) { @@ -156,7 +156,7 @@ public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage() // Acquire the lease on the file in a background thread final CountDownLatch leaseAttemptComplete = new CountDownLatch(1); final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { // Acquire the lease and then signal the main test thread. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java index 415a612f4bcb1..7730d5283d7c1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java @@ -41,7 +41,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; @@ -1644,9 +1644,9 @@ public void testLeaseAsDistributedLock() throws IllegalArgumentException, NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs; String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(LEASE_LOCK_FILE_KEY))); - Thread first = new HadoopThread(new LeaseLockAction("first-thread", fullKey)); + Thread first = new SubjectInheritingThread(new LeaseLockAction("first-thread", fullKey)); first.start(); - Thread second = new HadoopThread(new LeaseLockAction("second-thread", fullKey)); + Thread second = new SubjectInheritingThread(new LeaseLockAction("second-thread", fullKey)); second.start(); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java index 8e2feeb64448f..a5bd553839786 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -150,7 +150,7 @@ public void testMultiThreadedOperation() throws Exception { final ConcurrentLinkedQueue exceptionsEncountered = new ConcurrentLinkedQueue(); for (int i = 0; i < numThreads; i++) { final Path threadLocalFile = new Path("/myFile" + i); - threads[i] = new HadoopThread(new Runnable() { + threads[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java index efff0c79c6b62..2ecc2592b47be 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java @@ -24,7 +24,7 @@ import java.util.Date; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; public class TestBandwidthGaugeUpdater { @@ -57,7 +57,7 @@ public void testMultiThreaded() throws Exception { new BandwidthGaugeUpdater(instrumentation, 1000, true); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { - threads[i] = new HadoopThread(new Runnable() { + threads[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { updater.blockDownloaded(new Date(), new Date(), 10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index 3be5c555eb24d..557333ff0fd9a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -63,7 +63,7 @@ import org.apache.hadoop.fs.store.BlockUploadStatistics; import org.apache.hadoop.fs.store.DataBlocks; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; import static java.net.HttpURLConnection.HTTP_OK; @@ -1244,7 +1244,7 @@ public void testFlushSuccessWithConnectionResetOnResponseInvalidMd5() throws Exc out1.write(bytes1); //parallel flush call should lead to the first call failing because of md5 mismatch. - Thread parallelFlushThread = new HadoopThread(() -> { + Thread parallelFlushThread = new SubjectInheritingThread(() -> { try { out1.hsync(); } catch (IOException e) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index cb87293eb6784..87f8132d52c06 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -69,7 +69,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticAssertions; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.functional.FunctionRaisingIOE; import static java.net.HttpURLConnection.HTTP_CLIENT_TIMEOUT; @@ -1012,7 +1012,7 @@ public void testParallelRenameForAtomicRenameShouldFail() throws Exception { .acquireLease(Mockito.anyString(), Mockito.anyInt(), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { while (!leaseAcquired.get()) {} try { fs.rename(src, dst); @@ -1062,7 +1062,7 @@ public void testAppendAtomicBlobDuringRename() throws Exception { return answer.callRealMethod(); }).when(client).copyBlob(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new HadoopThread(() -> { + new SubjectInheritingThread(() -> { while (!copyInProgress.get()) {} try { os.write(1); diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java index ba6141fa6a032..e80ad8aca0545 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -368,7 +368,7 @@ private List readLines(File file) throws IOException { return lines; } - private static final class StreamPrinter extends HadoopThread { + private static final class StreamPrinter extends SubjectInheritingThread { private final InputStream in; private final List lines; diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java index ed22fc7e875a0..b77bf5afb97ca 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java @@ -23,7 +23,7 @@ import org.apache.hadoop.tools.util.WorkReport; import org.apache.hadoop.tools.util.WorkRequest; import org.apache.hadoop.tools.util.WorkRequestProcessor; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -147,7 +147,7 @@ public void testMultipleProducerConsumerShutdown() // starts two thread: a source thread which put in work, and a sink thread // which takes a piece of work from ProducerConsumer - class SourceThread extends HadoopThread { + class SourceThread extends SubjectInheritingThread { public void work() { while (true) { try { @@ -162,7 +162,7 @@ public void work() { // The source thread put requests into producer-consumer. SourceThread source = new SourceThread(); source.start(); - class SinkThread extends HadoopThread { + class SinkThread extends SubjectInheritingThread { public void work() { try { while (true) { diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java index 63b376fd317bd..f06195fccfbd7 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java @@ -19,7 +19,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import java.io.IOException; import java.nio.ByteBuffer; @@ -550,7 +550,7 @@ public void onContainersAllocated(List allocatedContainers) { + container.getNodeHttpAddress() + ", containerResourceMemory=" + rsrc.getMemorySize() + ", containerResourceVirtualCores=" + rsrc.getVirtualCores()); - Thread launchThread = new HadoopThread(containerLauncher); + Thread launchThread = new SubjectInheritingThread(containerLauncher); // launch and start the container on a separate thread to keep // the main thread unblocked diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java index d969a4908d5b3..f6b2368dd2b76 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java @@ -77,7 +77,7 @@ import org.apache.hadoop.util.ClassUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; @@ -892,7 +892,7 @@ private boolean monitorInfraApplication() throws YarnException, IOException { boolean loggedApplicationInfo = false; boolean success = false; - Thread namenodeMonitoringThread = new HadoopThread(() -> { + Thread namenodeMonitoringThread = new SubjectInheritingThread(() -> { Supplier exitCritera = () -> Apps.isApplicationFinalState(infraAppState); Optional namenodeProperties = Optional.empty(); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java index 17742bbc6db7b..f3779a33c8c1c 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java @@ -52,7 +52,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -320,7 +320,7 @@ static void waitForNameNodeReadiness(final Properties nameNodeProperties, .get(getNameNodeHdfsUri(nameNodeProperties), conf); log.info("Launching thread to trigger block reports for Datanodes with <" + blockThreshold + " blocks reported"); - Thread blockReportThread = new HadoopThread(() -> { + Thread blockReportThread = new SubjectInheritingThread(() -> { // Here we count both Missing and UnderReplicated within under // replicated long lastUnderRepBlocks = Long.MAX_VALUE; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java index 19dcdd08f8fd6..7326edb7f7d2e 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java @@ -57,7 +57,7 @@ import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -462,7 +462,7 @@ private Client createAndStartClient(Configuration localConf) { final Client client = new Client(JarFinder.getJar(ApplicationMaster.class), JarFinder.getJar(Assertions.class)); client.setConf(localConf); - Thread appThread = new HadoopThread(() -> { + Thread appThread = new SubjectInheritingThread(() -> { try { client.run(new String[] {"-" + Client.MASTER_MEMORY_MB_ARG, "128", "-" + Client.CONF_PATH_ARG, confZip.toString(), diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java index 25bc2445f614a..fe8281e5da7b5 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java @@ -43,7 +43,7 @@ import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.REPLAYCOUNTERS; import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.ReplayCommand; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +58,7 @@ * are inserted by the {@link AuditReplayMapper}. Once an item is ready, this * thread will fetch the command from the queue and attempt to replay it. */ -public class AuditReplayThread extends HadoopThread { +public class AuditReplayThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(AuditReplayThread.class); diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java index bf9a4c27916ee..6eabff12d006a 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java @@ -36,7 +36,7 @@ import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -310,7 +310,7 @@ public void setJournal(BalanceJournal journal) { /** * This thread consumes the delayQueue and move the jobs to the runningQueue. */ - class Rooster extends HadoopThread { + class Rooster extends SubjectInheritingThread { @Override public void work() { while (running.get()) { @@ -328,7 +328,7 @@ public void work() { /** * This thread consumes the runningQueue and give the job to the workers. */ - class Reader extends HadoopThread { + class Reader extends SubjectInheritingThread { @Override public void work() { while (running.get()) { @@ -362,7 +362,7 @@ public void work() { * This thread consumes the recoverQueue, recovers the job the adds it to the * runningQueue. */ - class Recover extends HadoopThread { + class Recover extends SubjectInheritingThread { @Override public void work() { while (running.get()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java index ce2778a6cd7a9..82fed9d568b6d 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tools.rumen.JobStoryProducer; import org.apache.hadoop.tools.rumen.ZombieJobProducer; import org.slf4j.Logger; @@ -628,7 +628,7 @@ private int setupDistCacheEmulation(Configuration conf, String traceIn, * pipeline abort its progress, waiting for each to exit and killing * any jobs still running on the cluster. */ - class Shutdown extends HadoopThread { + class Shutdown extends SubjectInheritingThread { static final long FAC_SLEEP = 1000; static final long SUB_SLEEP = 4000; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java index 5944d08dfd7f7..cb3192dca5bca 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java @@ -35,7 +35,7 @@ import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobStatus; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Component accepting submitted, running {@link Statistics.JobStats} and @@ -134,7 +134,7 @@ List getRemainingJobs() { * Monitoring thread pulling running jobs from the component and into * a queue to be polled for status. */ - private class MonitorThread extends HadoopThread { + private class MonitorThread extends SubjectInheritingThread { public MonitorThread(int i) { super("GridmixJobMonitor-" + i); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java index cbafa9ca9df17..c9a5cdb4ceebd 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java @@ -41,7 +41,7 @@ import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.TaskInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import java.io.IOException; @@ -144,7 +144,7 @@ private void configure() { * This is a progress based resource usage matcher. */ @SuppressWarnings("unchecked") - static class ResourceUsageMatcherRunner extends HadoopThread + static class ResourceUsageMatcherRunner extends SubjectInheritingThread implements Progressive { private final ResourceUsageMatcher matcher; private final BoostingProgress progress; @@ -235,7 +235,7 @@ void boost(float value) { // Makes sure that the TaskTracker doesn't kill the map/reduce tasks while // they are emulating - private static class StatusReporter extends HadoopThread { + private static class StatusReporter extends SubjectInheritingThread { private final TaskAttemptContext context; private final Progressive progress; diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java index 7c13e9e2c3665..60572a515fb77 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java @@ -22,7 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,7 +65,7 @@ public Thread createReaderThread() { public void update(Statistics.ClusterStats item) { } - private class ReplayReaderThread extends HadoopThread { + private class ReplayReaderThread extends SubjectInheritingThread { public ReplayReaderThread(String threadName) { super(threadName); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java index d5201f9384206..5f61d7e32ab4f 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java @@ -22,7 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; @@ -60,7 +60,7 @@ public Thread createReaderThread() { return new SerialReaderThread("SerialJobFactory"); } - private class SerialReaderThread extends HadoopThread { + private class SerialReaderThread extends SubjectInheritingThread { public SerialReaderThread(String threadName) { super(threadName); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java index 4de17aee14517..9178fdb85cb51 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java @@ -28,7 +28,7 @@ import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -217,7 +217,7 @@ public void start() { statistics.start(); } - private class StatCollector extends HadoopThread { + private class StatCollector extends SubjectInheritingThread { StatCollector() { super("StatsCollectorThread"); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java index bf5ea483e25c0..6e9c04a47b522 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java @@ -30,7 +30,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import java.io.IOException; import java.util.HashSet; @@ -137,7 +137,7 @@ public Thread createReaderThread() { * Worker thread responsible for reading descriptions, assigning sequence * numbers, and normalizing time. */ - private class StressReaderThread extends HadoopThread { + private class StressReaderThread extends SubjectInheritingThread { public StressReaderThread(String name) { super(name); diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java index 850e8ed9af0db..4d2646fb84c71 100644 --- a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java +++ b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java @@ -20,14 +20,14 @@ package org.apache.hadoop.resourceestimator.service; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Simple shutdown hook for {@link ResourceEstimatorServer}. */ -public class ShutdownHook extends HadoopThread { +public class ShutdownHook extends SubjectInheritingThread { private static final Logger LOGGER = LoggerFactory.getLogger(ShutdownHook.class); private final ResourceEstimatorServer server; diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java index 750b5dd371e97..fd4090fe85722 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java @@ -38,7 +38,7 @@ import org.apache.hadoop.streaming.io.TextOutputReader; import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.io.Text; /** Shared functionality for PipeMapper, PipeReducer. @@ -366,7 +366,7 @@ OutputReader createOutputReader(Class outputReaderClass) } - class MROutputThread extends HadoopThread { + class MROutputThread extends SubjectInheritingThread { MROutputThread(OutputReader outReader, OutputCollector outCollector, Reporter reporter) { @@ -418,7 +418,7 @@ public void work() { } - class MRErrorThread extends HadoopThread { + class MRErrorThread extends SubjectInheritingThread { public MRErrorThread() { this.reporterPrefix = job_.get("stream.stderr.reporter.prefix", "reporter:"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index d745d187c3b96..dad3920951418 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -69,7 +69,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; @@ -1762,7 +1762,7 @@ Thread createLaunchContainerThread(Container allocatedContainer, LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable(allocatedContainer, containerListener, shellId); - return new HadoopThread(runnableLaunchContainer); + return new SubjectInheritingThread(runnableLaunchContainer); } private void publishContainerStartEventOnTimelineServiceV2( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java index 7fc42f73542f7..34d4273a089b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java @@ -53,7 +53,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -334,7 +334,7 @@ protected void baseTestDSShell(String methodName, boolean haveDomain, boolean de assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread t = new HadoopThread(() -> { + Thread t = new SubjectInheritingThread(() -> { try { result.set(dsClient.run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java index 3ae65b3d0a27f..62defa28bbfc4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; @@ -70,7 +70,7 @@ protected Thread createLaunchContainerThread(Container allocatedContainer, threadsLaunched++; launchedContainers.add(allocatedContainer.getId()); yarnShellIds.add(shellId); - return new HadoopThread(); + return new SubjectInheritingThread(); } void setNumTotalContainers(int numTotalContainers) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java index 44018ad852e2b..f75808694b819 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java @@ -35,7 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -127,7 +127,7 @@ public void testDSShellWithEnforceExecutionType(TestInfo testInfo) throws Except try { setAndGetDSClient(new Configuration(getYarnClusterConfiguration())); getDSClient().init(args); - Thread dsClientRunner = new HadoopThread(() -> { + Thread dsClientRunner = new SubjectInheritingThread(() -> { try { getDSClient().run(); } catch (Exception e) { @@ -221,7 +221,7 @@ private void doTestDistributedShellWithResources( assertTrue(getDSClient().init(args)); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread dsClientRunner = new HadoopThread(() -> { + Thread dsClientRunner = new SubjectInheritingThread(() -> { try { result.set(getDSClient().run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java index b04bd4ed56a78..f38b504420807 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java @@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.client.api.YarnClient; @@ -321,7 +321,7 @@ public void testDistributedShellWithAllocationTagNamespace( new Client( new Configuration(distShellTest.getYarnClusterConfiguration())); dsClient.init(argsA); - Thread dsClientRunner = new HadoopThread(() -> { + Thread dsClientRunner = new SubjectInheritingThread(() -> { try { dsClient.run(); } catch (Exception e) { @@ -456,7 +456,7 @@ private void waitForExpectedNMsCount(int[] expectedNMCounts, /** * Monitor containers running on NMs. */ - class NMContainerMonitor extends HadoopThread { + class NMContainerMonitor extends SubjectInheritingThread { // The interval of milliseconds of sampling (500ms) private final static int SAMPLING_INTERVAL_MS = 500; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java index 30e5162c20a37..7e7af2ccd1273 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java @@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -243,7 +243,7 @@ public void launchAM(ApplicationAttemptId attemptId) // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new HadoopThread() { + Thread errThread = new SubjectInheritingThread() { @Override public void work() { try { @@ -257,7 +257,7 @@ public void work() { } } }; - Thread outThread = new HadoopThread() { + Thread outThread = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java index 613eb8a089424..fcb821736023e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.service.SystemServiceManager; @@ -128,7 +128,7 @@ protected void serviceStart() throws Exception { launchUserService(syncUserServices); // Create a thread and submit services in background otherwise it // block RM switch time. - serviceLaucher = new HadoopThread(createRunnable()); + serviceLaucher = new SubjectInheritingThread(createRunnable()); serviceLaucher.setName("System service launcher"); serviceLaucher.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java index ebb766903c891..168d24fb36b0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -26,7 +26,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ExitUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -155,7 +155,7 @@ public StopResponseProto stop(StopRequestProto requestProto) // Stop the service in 2 seconds delay to make sure this rpc call is completed. // shutdown hook will be executed which will stop AM gracefully. - Thread thread = new HadoopThread() { + Thread thread = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java index 8ce29f098fff3..27440428838d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java @@ -19,7 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.registry.client.api.RegistryConstants; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.service.ServiceTestUtils; import org.apache.hadoop.yarn.service.api.records.Artifact; @@ -726,7 +726,7 @@ public void testNoServiceDependencies() { @Test public void testServiceDependencies() { - HadoopThread thread = new HadoopThread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override public void work() { Service service = createExampleApplication(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java index 89c01f9bf8d53..34097ab545b01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.eclipse.jetty.websocket.api.Session; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect; @@ -86,7 +86,7 @@ public void onClose(Session session, int status, String reason) { public void run() { try { Reader consoleReader = new Reader(); - Thread inputThread = new HadoopThread(consoleReader, "consoleReader"); + Thread inputThread = new SubjectInheritingThread(consoleReader, "consoleReader"); inputThread.start(); while (mySession.isOpen()) { mySession.getRemote().flush(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java index 2fda78d5de474..7a45603634542 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java @@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.Container; @@ -294,7 +294,7 @@ public void updateTrackingUrl(String trackingUrl) { client.updateTrackingUrl(trackingUrl); } - private class HeartbeatThread extends HadoopThread { + private class HeartbeatThread extends SubjectInheritingThread { public HeartbeatThread() { super("AMRM Heartbeater thread"); } @@ -338,7 +338,7 @@ public void work() { } } - private class CallbackHandlerThread extends HadoopThread { + private class CallbackHandlerThread extends SubjectInheritingThread { public CallbackHandlerThread() { super("AMRM Callback Handler Thread"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java index e0e737017d777..91650f84e93df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java @@ -59,7 +59,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,7 +74,7 @@ public class NMClientAsyncImpl extends NMClientAsync { protected ThreadPoolExecutor threadPool; protected int maxThreadPoolSize; - protected HadoopThread eventDispatcherThread; + protected SubjectInheritingThread eventDispatcherThread; protected AtomicBoolean stopped = new AtomicBoolean(false); protected BlockingQueue events = new LinkedBlockingQueue(); @@ -152,7 +152,7 @@ protected void serviceStart() throws Exception { threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventDispatcherThread = new HadoopThread() { + eventDispatcherThread = new SubjectInheritingThread() { @Override public void work() { ContainerEvent event = null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index a3a536ad46391..7ba4dfaa2cfd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -63,7 +63,7 @@ import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.Priority; @@ -368,7 +368,7 @@ private static class QueueMetrics { long pendingContainers; } - private class KeyboardMonitor extends HadoopThread { + private class KeyboardMonitor extends SubjectInheritingThread { public void work() { Scanner keyboard = new Scanner(System.in, "UTF-8"); @@ -1230,7 +1230,7 @@ private String getCommandOutput(String[] command) throws IOException, private void addShutdownHook() { //clear screen when the program exits - Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { + Runtime.getRuntime().addShutdownHook(new SubjectInheritingThread(() -> { clearScreen(); })); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index df21a87017f76..4335b39f86f03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -23,7 +23,7 @@ import java.util.Map; import java.util.concurrent.TimeoutException; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; @@ -245,7 +245,7 @@ protected void verifyClientConnection() throws InterruptedException { } protected Thread createAndStartFailoverThread() { - HadoopThread failoverThread = new HadoopThread() { + SubjectInheritingThread failoverThread = new SubjectInheritingThread() { public void work() { keepRunning = true; while (keepRunning) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java index 0a1a75ab66ee2..b7ab17485d8aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; @@ -161,7 +161,7 @@ private void testProxyProvider(boolean facadeFlushCache) throws Exception { .getSubClusters(any(GetSubClustersInfoRequest.class)); threadResponse = null; - Thread thread = new HadoopThread(new Runnable() { + Thread thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java index ec99c7c505f0b..01540c5843217 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java @@ -29,7 +29,7 @@ import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.tools.GetGroupsTestBase; import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.junit.jupiter.api.AfterAll; @@ -71,7 +71,7 @@ public void stateChanged(Service service) { resourceManager.registerServiceListener(rmStateChangeListener); resourceManager.init(conf); - new HadoopThread() { + new SubjectInheritingThread() { public void work() { resourceManager.start(); }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java index 2aabe51250eab..72cd5104ef323 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; @@ -108,7 +108,7 @@ private void validateActiveRM(YarnClient client) throws IOException { } private void makeRMActive(final MiniYARNCluster cluster, final int index) { - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { System.out.println("Transition rm" + index + " to active"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java index 4b64e68908a05..fc7cc9f2c8545 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java @@ -41,7 +41,7 @@ import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ExitUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -403,7 +403,7 @@ public void testUncaughtExceptionHandlerWithHAEnabled() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new HadoopThread(new Runnable() { + final Thread testThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw rte; @@ -447,7 +447,7 @@ public void testUncaughtExceptionHandlerWithoutHA() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new HadoopThread(new Runnable() { + final Thread testThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw rte; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java index 925712988a0d6..03989ccd67b3d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java @@ -26,7 +26,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.service.ServiceStateChangeListener; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.DecommissionType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -98,7 +98,7 @@ public void stateChanged(Service service) { resourceManager.registerServiceListener(rmStateChangeListener); resourceManager.init(configuration); - new HadoopThread() { + new SubjectInheritingThread() { public void work() { resourceManager.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java index e8aac07520859..408ed7517a7a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java @@ -47,7 +47,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.ServiceOperations; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -783,7 +783,7 @@ public void testOutOfOrder() throws Exception { recordFactory.newRecordInstance(ContainerLaunchContext.class); // start container from another thread - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { asyncClient.startContainerAsync(container, clc); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 1cc1d3886a86a..3e00c1647c25d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -23,7 +23,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; @@ -292,7 +292,7 @@ public void testSubmitApplicationInterrupted(SchedulerType type) throws IOExcept client.start(); // Submit the application and then interrupt it while its waiting // for submission to be successful. - final class SubmitThread extends HadoopThread { + final class SubmitThread extends SubjectInheritingThread { private boolean isInterrupted = false; @Override public void work() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index d71b06a3f94af..7be22ad4d4ab6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -42,7 +42,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -219,7 +219,7 @@ protected void serviceInit(Configuration conf) throws Exception{ protected void serviceStart() throws Exception { //start all the components super.serviceStart(); - eventHandlingThread = new HadoopThread(createThread()); + eventHandlingThread = new SubjectInheritingThread(createThread()); eventHandlingThread.setName(dispatcherThreadName); eventHandlingThread.start(); } @@ -285,7 +285,7 @@ protected void dispatch(Event event) { && (ShutdownHookManager.get().isShutdownInProgress()) == false && stopped == false) { stopped = true; - Thread shutDownThread = new HadoopThread(createShutDownThread()); + Thread shutDownThread = new SubjectInheritingThread(createShutDownThread()); shutDownThread.setName("AsyncDispatcher ShutDown handler"); shutDownThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index e4a7024c8e47b..a5f756ecc845e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -28,7 +28,7 @@ import org.slf4j.MarkerFactory; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import java.util.concurrent.BlockingQueue; @@ -106,7 +106,7 @@ public void run() { public EventDispatcher(EventHandler handler, String name) { super(name); this.handler = handler; - this.eventProcessor = new HadoopThread(new EventProcessor()); + this.eventProcessor = new SubjectInheritingThread(new EventProcessor()); this.eventProcessor.setName(getName() + ":Event Processor"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java index 809aa8e34ff63..944baebfaaaa8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java @@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * A simple liveliness monitor with which clients can register, trust the @@ -67,7 +67,7 @@ public AbstractLivelinessMonitor(String name) { protected void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; resetTimer(); - checkerThread = new HadoopThread(new PingChecker()); + checkerThread = new SubjectInheritingThread(new PingChecker()); checkerThread.setName("Ping Checker for "+getName()); checkerThread.start(); super.serviceStart(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java index aaf13ca12761e..d2d2f59825559 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java @@ -21,7 +21,7 @@ import org.junit.jupiter.api.Test; import org.apache.hadoop.util.ExitUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import static org.junit.jupiter.api.Assertions.assertSame; @@ -45,7 +45,7 @@ void testUncaughtExceptionHandlerWithRuntimeException() final YarnUncaughtExceptionHandler spyYarnHandler = spy(exHandler); final YarnRuntimeException yarnException = new YarnRuntimeException( "test-yarn-runtime-exception"); - final Thread yarnThread = new HadoopThread(new Runnable() { + final Thread yarnThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw yarnException; @@ -75,7 +75,7 @@ void testUncaughtExceptionHandlerWithError() ExitUtil.disableSystemExit(); final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler); final java.lang.Error error = new java.lang.Error("test-error"); - final Thread errorThread = new HadoopThread(new Runnable() { + final Thread errorThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw error; @@ -104,7 +104,7 @@ void testUncaughtExceptionHandlerWithOutOfMemoryError() ExitUtil.disableSystemHalt(); final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler); final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error"); - final Thread oomThread = new HadoopThread(new Runnable() { + final Thread oomThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw oomError; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java index 3028419bd61cb..9fffdf60df888 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java @@ -55,7 +55,7 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.TestContainerId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -175,7 +175,7 @@ private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long l final CountDownLatch latch = new CountDownLatch(1); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { public void work() { try { for (int i = 0; i < length / 3; i++) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java index d576c08df13b3..556598eb1950f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java @@ -47,7 +47,7 @@ import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.MemInfo; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.ProcessSmapMemoryInfo; @@ -76,7 +76,7 @@ public class TestProcfsBasedProcessTree { private static final int N = 6; // Controls the RogueTask - private class RogueTaskThread extends HadoopThread { + private class RogueTaskThread extends SubjectInheritingThread { public void work() { try { Vector args = new Vector(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java index 0e1ca1be583a6..8089c0faceaed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.commons.collections4.map.LRUMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -284,7 +284,7 @@ public StartAndInsertTime(long startTime, long insertTime) { } } - private class EntityDeletionThread extends HadoopThread { + private class EntityDeletionThread extends SubjectInheritingThread { private final long ttl; private final long ttlInterval; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index cd7b9cb77e75b..1d52a74c4ac91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import java.io.IOException; import java.util.ArrayList; @@ -390,7 +390,7 @@ protected void serviceStop() throws Exception { super.serviceStop(); } - private class EntityDeletionThread extends HadoopThread { + private class EntityDeletionThread extends SubjectInheritingThread { private final long ttl; private final long ttlInterval; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java index f2110da05eccf..0d6a4d188be03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java @@ -34,14 +34,14 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Extends Thread and provides an implementation that is used for processing the * AM heart beat request asynchronously and sending back the response using the * callback method registered with the system. */ -public class AMHeartbeatRequestHandler extends HadoopThread { +public class AMHeartbeatRequestHandler extends SubjectInheritingThread { public static final Logger LOG = LoggerFactory.getLogger(AMHeartbeatRequestHandler.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java index f1a385d2b1452..ecad69c6fd333 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java @@ -37,7 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -106,7 +106,7 @@ protected void serviceStart() throws Exception { protected void serviceStop() throws Exception { if (!this.unmanagedAppMasterMap.isEmpty()) { - finishApplicationThread = new HadoopThread(createForceFinishApplicationThread()); + finishApplicationThread = new SubjectInheritingThread(createForceFinishApplicationThread()); finishApplicationThread.setName(dispatcherThreadName); finishApplicationThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java index 9e0a899d718f9..80b9c7d9b695e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java @@ -33,7 +33,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; @@ -229,7 +229,7 @@ public void testSlowRegisterCall() throws YarnException, IOException, InterruptedException { // Register with wait() in RM in a separate thread - Thread registerAMThread = new HadoopThread(new Runnable() { + Thread registerAMThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index ff941fcfe2112..ad34ccc329cfc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -68,7 +68,7 @@ import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDERR; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDOUT; @@ -852,7 +852,7 @@ public String getProcessId(ContainerId containerID) { * This class will signal a target container after a specified delay. * @see #signalContainer */ - public static class DelayedProcessKiller extends HadoopThread { + public static class DelayedProcessKiller extends SubjectInheritingThread { private final Container container; private final String user; private final String pid; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 4019efc3c7ca0..d109de3c79dab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -40,7 +40,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -535,7 +535,7 @@ public String getName() { } protected void shutDown(final int exitCode) { - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { @@ -560,7 +560,7 @@ protected void resyncWithRM() { // Some other thread is already created for resyncing, do nothing } else { // We have got the lock, create a new thread - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index b83fe4944a36f..bb5200c07458c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.api.records.ResourceUtilization; @@ -150,7 +150,7 @@ protected void serviceStop() throws Exception { /** * Thread that monitors the resource utilization of this node. */ - private class MonitoringThread extends HadoopThread { + private class MonitoringThread extends SubjectInheritingThread { /** * Initialize the node resource monitoring thread. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index a12742e4eae90..e3b627ab80a61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -44,7 +44,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -330,7 +330,7 @@ protected void rebootNodeStatusUpdaterAndRegisterWithRM() { try { statusUpdater.join(); registerWithRM(); - statusUpdater = new HadoopThread(statusUpdaterRunnable, "Node Status Updater"); + statusUpdater = new SubjectInheritingThread(statusUpdaterRunnable, "Node Status Updater"); this.isStopped = false; statusUpdater.start(); LOG.info("NodeStatusUpdater thread is reRegistered and restarted"); @@ -829,7 +829,7 @@ private static Map parseCredentials( protected void startStatusUpdater() { statusUpdaterRunnable = new StatusUpdaterRunnable(); statusUpdater = - new HadoopThread(statusUpdaterRunnable, "Node Status Updater"); + new SubjectInheritingThread(statusUpdaterRunnable, "Node Status Updater"); statusUpdater.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index e5fcf92ae7e52..7658cc9ee0be2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -52,7 +52,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; @@ -498,7 +498,7 @@ public void validateResult() throws IOException { private Thread startStreamReader(final InputStream stream) throws IOException { - Thread streamReaderThread = new HadoopThread() { + Thread streamReaderThread = new SubjectInheritingThread() { @Override public void work() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 6c9a0e69583cf..9da951e0558f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -48,7 +48,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -1750,7 +1750,7 @@ private void doRelaunch(final ContainerImpl container, container.sendRelaunchEvent(); } else { // wait for some time, then send launch event - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java index f381b2a514a09..db486dfb4d0c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java @@ -23,7 +23,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -56,7 +56,7 @@ * events of all the containers together, and if we go over the limit picks * a container to kill. The algorithm that picks the container is a plugin. */ -public class CGroupElasticMemoryController extends HadoopThread { +public class CGroupElasticMemoryController extends SubjectInheritingThread { protected static final Logger LOG = LoggerFactory .getLogger(CGroupElasticMemoryController.class); private final Clock clock = new MonotonicClock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 74cbc90124876..fcce814fd9895 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -80,7 +80,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -862,7 +862,7 @@ private static ExecutorService createLocalizerExecutor(Configuration conf) { } - class PublicLocalizer extends HadoopThread { + class PublicLocalizer extends SubjectInheritingThread { final FileContext lfs; final Configuration conf; @@ -1031,7 +1031,7 @@ public void work() { * access to user's credentials. One {@link LocalizerRunner} per localizerId. * */ - class LocalizerRunner extends HadoopThread { + class LocalizerRunner extends SubjectInheritingThread { final LocalizerContext context; final String localizerId; @@ -1406,7 +1406,7 @@ static String buildTokenFingerprint(Token tk) return fingerprint.toString(); } - static class CacheCleanup extends HadoopThread { + static class CacheCleanup extends SubjectInheritingThread { private final Dispatcher dispatcher; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 608801d5e3cc6..f4869c6f62a0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; @@ -490,7 +490,7 @@ boolean isProcessTreeOverLimit(ResourceCalculatorProcessTree pTree, curMemUsageOfAgedProcesses, limit); } - private class MonitoringThread extends HadoopThread { + private class MonitoringThread extends SubjectInheritingThread { MonitoringThread() { super("Container Monitor"); } @@ -885,7 +885,7 @@ private String formatUsageString(long currentVmemUsage, long vmemLimit, } } - private class LogMonitorThread extends HadoopThread { + private class LogMonitorThread extends SubjectInheritingThread { LogMonitorThread() { super("Container Log Monitor"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java index 9d5ecfb27d8b4..bb114e50c9405 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java @@ -66,7 +66,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -463,7 +463,7 @@ public void testContainerKill() throws Exception { assumeTrue(shouldRun()); final ContainerId sleepId = getNextContainerId(); - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { public void work() { try { runAndBlock(sleepId, "sleep", "100"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index e6b6680de4a67..8bf27b1bdeeb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -48,7 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -745,7 +745,7 @@ protected void rebootNodeStatusUpdaterAndRegisterWithRM() { } } - class ContainerUpdateResourceThread extends HadoopThread { + class ContainerUpdateResourceThread extends SubjectInheritingThread { @Override public void work() { // Construct container resource increase request diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index dbcf61ec7e809..d663298d34c96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -69,7 +69,7 @@ import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1174,7 +1174,7 @@ protected NodeStatusUpdater createNodeStatusUpdater(Context context, assertTrue(lastService instanceof NodeStatusUpdater, "last service is NOT the node status updater"); - Thread starterThread = new HadoopThread(() -> { + Thread starterThread = new SubjectInheritingThread(() -> { try { nm.start(); } catch (Throwable e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java index 5c32c4ceccbdc..4b223f2f76a14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java @@ -74,7 +74,7 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; @@ -322,7 +322,7 @@ public void testMultipleLocalizers() throws Exception { FakeContainerLocalizer localizerB = testB.init(); // run localization - HadoopThread threadA = new HadoopThread() { + SubjectInheritingThread threadA = new SubjectInheritingThread() { @Override public void work() { try { @@ -332,7 +332,7 @@ public void work() { } } }; - HadoopThread threadB = new HadoopThread() { + SubjectInheritingThread threadB = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java index 054494bfd8453..0f69fe9bded51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.util; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; @@ -115,7 +115,7 @@ public void testDeleteCgroup() throws Exception { fos.close(); final CountDownLatch latch = new CountDownLatch(1); - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { latch.countDown(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 4fda747e9c824..cdc815a686cd1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -57,7 +57,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1136,7 +1136,7 @@ private class SchedulerEventDispatcher extends SchedulerEventDispatcher(String name, int samplesPerMin) { super(scheduler, name); this.eventProcessorMonitor = - new HadoopThread(new EventProcessorMonitor(getEventProcessorId(), + new SubjectInheritingThread(new EventProcessorMonitor(getEventProcessorId(), samplesPerMin)); this.eventProcessorMonitor .setName("ResourceManager Event Processor Monitor"); @@ -1221,7 +1221,7 @@ protected void serviceStop() throws Exception { */ private void handleTransitionToStandByInNewThread() { Thread standByTransitionThread = - new HadoopThread(activeServices.standByTransitionRunnable); + new SubjectInheritingThread(activeServices.standByTransitionRunnable); standByTransitionThread.setName("StandByTransitionThread"); standByTransitionThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java index 928dc74ff21ac..36436e892992e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java @@ -24,7 +24,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -106,7 +106,7 @@ protected void serviceStop() throws Exception { launcherPool.shutdown(); } - private class LauncherThread extends HadoopThread { + private class LauncherThread extends SubjectInheritingThread { public LauncherThread() { super("ApplicationMaster Launcher"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java index 7f6424d3e50b4..e19d5a24048a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java @@ -31,7 +31,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -598,7 +598,7 @@ public int incrementCurrentKeyId() { */ public void createCleanUpFinishApplicationThread(String stage) { String threadName = cleanUpThreadNamePrefix + "-" + stage; - Thread finishApplicationThread = new HadoopThread(createCleanUpFinishApplicationThread()); + Thread finishApplicationThread = new SubjectInheritingThread(createCleanUpFinishApplicationThread()); finishApplicationThread.setName(threadName); finishApplicationThread.start(); LOG.info("CleanUpFinishApplicationThread has been started {}.", threadName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java index 09e8e4c872126..9985e1997c8bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java @@ -29,7 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -537,7 +537,7 @@ public void handle(TimelineV1PublishEvent event) { } } - private class PutEventThread extends HadoopThread { + private class PutEventThread extends SubjectInheritingThread { PutEventThread() { super("PutEventThread"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index e132b54c1d01d..89f7c1bfb246f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -27,7 +27,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.classification.VisibleForTesting; @@ -71,7 +71,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new HadoopThread(r); + Thread t = new SubjectInheritingThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 241d16225982e..41797ac57e02b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -30,7 +30,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.ZKUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.curator.ZKCuratorManager.SafeTransaction; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -1469,7 +1469,7 @@ public void safeDeleteAndCheckNode(String path, List fencingACL, * Helper class that periodically attempts creating a znode to ensure that * this RM continues to be the Active. */ - private class VerifyActiveStatusThread extends HadoopThread { + private class VerifyActiveStatusThread extends SubjectInheritingThread { VerifyActiveStatusThread() { super(VerifyActiveStatusThread.class.getName()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 0f6e7999a9e55..4fb423425b26f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -45,7 +45,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringInterner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1239,7 +1239,7 @@ public RMAppAttemptState transition(RMAppAttemptImpl appAttempt, private void retryFetchingAMContainer(final RMAppAttemptImpl appAttempt) { // start a new thread so that we are not blocking main dispatcher thread. - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 7bfe8f93d77f9..885665ea72d62 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -117,7 +117,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; @SuppressWarnings("unchecked") @@ -1717,7 +1717,7 @@ public void update() { * Thread which calls {@link #update()} every * updateInterval milliseconds. */ - private class UpdateThread extends HadoopThread { + private class UpdateThread extends SubjectInheritingThread { @Override public void work() { while (!Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index b9693e153bb65..facb0a08b762c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -21,7 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -296,7 +296,7 @@ private void dynamicallyUpdateAppActivitiesMaxQueueLengthIfNeeded() { @Override protected void serviceStart() throws Exception { - cleanUpThread = new HadoopThread(new Runnable() { + cleanUpThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while (!stopped && !Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 1f26e24abcb02..e36e032c2974a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -53,7 +53,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -639,7 +639,7 @@ public void setAsyncSchedulingConf(AsyncSchedulingConfiguration conf) { this.asyncSchedulingConf = conf; } - static class AsyncScheduleThread extends HadoopThread { + static class AsyncScheduleThread extends SubjectInheritingThread { private final CapacityScheduler cs; private AtomicBoolean runSchedules = new AtomicBoolean(false); @@ -692,7 +692,7 @@ public void suspendSchedule() { } - static class ResourceCommitterService extends HadoopThread { + static class ResourceCommitterService extends SubjectInheritingThread { private final CapacityScheduler cs; private BlockingQueue> backlogs = new LinkedBlockingQueue<>(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 85e707b2f3ea2..1ad1bf029b291 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -38,7 +38,7 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.XMLUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.security.Permission; @@ -119,7 +119,7 @@ public void serviceInit(Configuration conf) throws Exception { this.allocFile = getAllocationFile(conf); if (this.allocFile != null) { this.fs = allocFile.getFileSystem(conf); - reloadThread = new HadoopThread(() -> { + reloadThread = new SubjectInheritingThread(() -> { while (running) { try { synchronized (this) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java index 8047a96846690..48c0c981a7e19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java @@ -19,7 +19,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Resource; @@ -40,7 +40,7 @@ /** * Thread that handles FairScheduler preemption. */ -class FSPreemptionThread extends HadoopThread { +class FSPreemptionThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory. getLogger(FSPreemptionThread.class); protected final FSContext context; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 6a15393d9cbfe..96e0b7944fc03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -100,7 +100,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.slf4j.Logger; @@ -316,7 +316,7 @@ public QueueManager getQueueManager() { * asynchronous to the node heartbeats. */ @Deprecated - private class ContinuousSchedulingThread extends HadoopThread { + private class ContinuousSchedulingThread extends SubjectInheritingThread { @Override public void work() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java index d8251a384690f..4000e6de58b4c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java @@ -34,7 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -100,7 +100,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new HadoopThread(r); + Thread t = new SubjectInheritingThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index 271ae991746de..9b4cf0a6015fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -62,7 +62,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AbstractEvent; @@ -201,7 +201,7 @@ protected void serviceStart() throws Exception { dtCancelThread.start(); if (tokenKeepAliveEnabled) { delayedRemovalThread = - new HadoopThread(new DelayedTokenRemovalRunnable(getConfig()), + new SubjectInheritingThread(new DelayedTokenRemovalRunnable(getConfig()), "DelayedTokenCanceller"); delayedRemovalThread.start(); } @@ -348,7 +348,7 @@ public int hashCode() { } - private static class DelegationTokenCancelThread extends HadoopThread { + private static class DelegationTokenCancelThread extends SubjectInheritingThread { private static class TokenWithConf { Token token; Configuration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java index b39e2a6095171..e701000b14744 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java @@ -31,7 +31,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.Service.STATE; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -90,7 +90,7 @@ this.queueACLsManager, getRMContext() protected void doSecureLogin() throws IOException { } }; - new HadoopThread() { + new SubjectInheritingThread() { public void work() { resourceManager.start(); }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index 642f6d6a35dba..bf124e51d9b83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -55,7 +55,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.Service.STATE; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; @@ -151,7 +151,7 @@ protected ClientRMService createClientRMService() { this.queueACLsManager, null); }; }; - new HadoopThread() { + new SubjectInheritingThread() { public void work() { UserGroupInformation.createUserForTesting(ENEMY, new String[] {}); UserGroupInformation.createUserForTesting(FRIEND, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index df56378637fe8..b3db045279a57 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -71,7 +71,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Sets; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; @@ -1533,7 +1533,7 @@ public void handle(Event rawEvent) { rmService.init(new Configuration()); // submit an app and wait for it to block while in app submission - HadoopThread t = new HadoopThread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java index ccce2410cd438..cdd97efdbe7b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java @@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -190,7 +190,7 @@ public void testExpireCurrentZKSession() throws Exception{ public void testRMFailToTransitionToActive() throws Exception{ conf.set(YarnConfiguration.RM_HA_ID, "rm1"); final AtomicBoolean throwException = new AtomicBoolean(true); - HadoopThread launchRM = new HadoopThread() { + SubjectInheritingThread launchRM = new SubjectInheritingThread() { @Override public void work() { rm1 = new MockRM(conf, true) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index 7dc011cd1fcb1..5a27a563d4bc1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -24,7 +24,7 @@ import java.util.UUID; import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import static org.assertj.core.api.Assertions.assertThat; @@ -516,7 +516,7 @@ void stopActiveServices() { rm.adminService.transitionToActive(requestInfo); // 3. Try Transition to standby - Thread t = new HadoopThread(new Runnable() { + Thread t = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index 92da1f682080c..d83b862896f1f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -415,7 +415,7 @@ public void testFSRMStateStoreClientRetry() throws Exception { final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); cluster.shutdownNameNodes(); - Thread clientThread = new HadoopThread(() -> { + Thread clientThread = new SubjectInheritingThread(() -> { try { store.storeApplicationStateInternal( ApplicationId.newInstance(100L, 1), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java index 42dfa45ce06c5..627a6dc888575 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreTestBase.TestDispatcher; import org.apache.hadoop.util.ZKUtil; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -112,7 +112,7 @@ public void testZKClientRetry() throws Exception { final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); testingServer.stop(); - HadoopThread clientThread = new HadoopThread() { + SubjectInheritingThread clientThread = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index bbe42d9d55f4c..e09706002fdb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -26,7 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -102,7 +102,7 @@ public void setUp() { resourceTrackerService.start(); } - private class ThirdNodeHeartBeatThread extends HadoopThread { + private class ThirdNodeHeartBeatThread extends SubjectInheritingThread { public void work() { int lastResponseID = 0; while (!stopT) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java index 39dc7d823db15..f0e74a68a38d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java @@ -24,7 +24,7 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; import org.apache.hadoop.test.MetricsAsserts; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -818,7 +818,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { * simulate the concurrent calls for QueueMetrics#getQueueMetrics */ // thread A will keep querying the same queue metrics for a specified number of iterations - Thread threadA = new HadoopThread(() -> { + Thread threadA = new SubjectInheritingThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics qm = QueueMetrics.getQueueMetrics().get(queueName); @@ -834,7 +834,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { } }); // thread B will keep adding new queue metrics for a specified number of iterations - Thread threadB = new HadoopThread(() -> { + Thread threadB = new SubjectInheritingThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics.getQueueMetrics().put("q" + i, metrics); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index c074c0f8873f4..ed76a5cad9ede 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -111,7 +111,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -1065,7 +1065,7 @@ public ApplicationMasterProtocol run() { // grab the scheduler lock from another thread // and verify an allocate call in this thread doesn't block on it final CyclicBarrier barrier = new CyclicBarrier(2); - Thread otherThread = new HadoopThread(new Runnable() { + Thread otherThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { synchronized(cs) { @@ -3089,7 +3089,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { // The scheduler thread holds the queue's read-lock for 5 seconds // then the preemption's read-lock is used - Thread schedulerThread = new HadoopThread(() -> { + Thread schedulerThread = new SubjectInheritingThread(() -> { queue.readLock.lock(); try { Thread.sleep(5 * 1000); @@ -3102,7 +3102,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { }, "SCHEDULE"); // The complete thread locks/unlocks the queue's write-lock after 1 seconds - Thread completeThread = new HadoopThread(() -> { + Thread completeThread = new SubjectInheritingThread(() -> { try { Thread.sleep(1000); } catch (InterruptedException e) { @@ -3116,7 +3116,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { // The refresh thread holds the preemption's write-lock after 2 seconds // while it calls the getChildQueues(ByTryLock) that // locks(tryLocks) the queue's read-lock - Thread refreshThread = new HadoopThread(() -> { + Thread refreshThread = new SubjectInheritingThread(() -> { try { Thread.sleep(2 * 1000); } catch (InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java index 920795fd15111..f7df32734a072 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java @@ -31,7 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -802,7 +802,7 @@ public RMNodeLabelsManager createNodeLabelManager() { rm.close(); } - public static class NMHeartbeatThread extends HadoopThread { + public static class NMHeartbeatThread extends SubjectInheritingThread { private List mockNMS; private int interval; private volatile boolean shouldStop = false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java index 743fac60e444b..7bfa3aaac6fc0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java @@ -39,7 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -422,7 +422,7 @@ public void testAllocateOfReservedContainerFromAnotherNode() .build()); final AtomicBoolean result = new AtomicBoolean(false); - Thread t = new HadoopThread() { + Thread t = new SubjectInheritingThread() { public void work() { try { MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java index dc098981a6583..3c105a93244ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java @@ -24,7 +24,7 @@ import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -172,7 +172,7 @@ public void testAllocateReservationFromOtherNode() throws Exception { // Launch AM in a thread and in parallel free the preempted node's // unallocated resources in main thread - Thread t1 = new HadoopThread() { + Thread t1 = new SubjectInheritingThread() { public void work() { try { MockAM am2 = MockRM.launchAM(app2, rm, nm1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index f14b2cbe10591..01326438e7414 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -62,7 +62,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -1130,7 +1130,7 @@ public void testUserLimitCache() throws Exception { // Set up allocation threads Thread[] threads = new Thread[numAllocationThreads]; for (int i = 0; i < numAllocationThreads; i++) { - threads[i] = new HadoopThread(new Runnable() { + threads[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -4387,7 +4387,7 @@ public void testConcurrentAccess() throws Exception { final List conException = new ArrayList(); - Thread submitAndRemove = new HadoopThread(new Runnable() { + Thread submitAndRemove = new SubjectInheritingThread(new Runnable() { @Override public void run() { @@ -4406,7 +4406,7 @@ public void run() { } }, "SubmitAndRemoveApplicationAttempt Thread"); - Thread getAppsInQueue = new HadoopThread(new Runnable() { + Thread getAppsInQueue = new SubjectInheritingThread(new Runnable() { List apps = new ArrayList(); @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java index cf9f02f331fd8..009f84b0a49b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -339,7 +339,7 @@ public void TestNodeAvailableResourceComparatorTransitivity() { } // To simulate unallocated resource changes - new HadoopThread() { + new SubjectInheritingThread() { @Override public void work() { for (int j = 0; j < 100; j++) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java index fc6d9d646332c..653fcaee608e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java @@ -31,7 +31,7 @@ import java.util.TreeSet; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -485,7 +485,7 @@ public void testModWhileSorting(){ * Thread to simulate concurrent schedulable changes while sorting */ private Thread modificationThread(final List schedulableList) { - HadoopThread modThread = new HadoopThread() { + SubjectInheritingThread modThread = new SubjectInheritingThread() { @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 33e167d4aa759..13ba0bb67e4cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -74,7 +74,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -995,7 +995,7 @@ public Long answer(InvocationOnMock invocation) localDtr.init(conf); localDtr.start(); // submit a job that blocks during renewal - HadoopThread submitThread = new HadoopThread() { + SubjectInheritingThread submitThread = new SubjectInheritingThread() { @Override public void work() { localDtr.addApplicationAsync(mock(ApplicationId.class), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java index 324f65c582461..a027c87fb24c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java @@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; @@ -1736,7 +1736,7 @@ public void testSchedulerBulkActivities() throws Exception { } } - private class RESTClient extends HadoopThread { + private class RESTClient extends SubjectInheritingThread { private int expectedCount; private boolean done = false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java index 0fb77c2369a7c..fbd3b4efd0757 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java @@ -47,7 +47,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -203,7 +203,7 @@ protected void serviceStop() throws Exception { } protected void shutDown() { - new HadoopThread(Router.this::stop).start(); + new SubjectInheritingThread(Router.this::stop).start(); } protected RouterClientRMService createClientRMProxyService() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java index 13613a8fd2640..9017188871ff1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java @@ -29,7 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; @@ -227,7 +227,7 @@ public void testClientPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * ClientRequestInterceptor for the user. */ - class ClientTestThread extends HadoopThread { + class ClientTestThread extends SubjectInheritingThread { private ClientRequestInterceptor interceptor; @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java index 15cdcad9f4f26..1430e98ed3bdd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java @@ -29,7 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; @@ -236,7 +236,7 @@ public void testRMAdminPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RMAdminRequestInterceptor for the user. */ - class ClientTestThread extends HadoopThread { + class ClientTestThread extends SubjectInheritingThread { private RMAdminRequestInterceptor interceptor; @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java index 60f7bf8ac4a80..7ad5220d67514 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java @@ -31,7 +31,7 @@ import javax.ws.rs.core.Response; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; @@ -298,7 +298,7 @@ public void testWebPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RESTRequestInterceptor for the user. */ - class ClientTestThread extends HadoopThread { + class ClientTestThread extends SubjectInheritingThread { private RESTRequestInterceptor interceptor; @Override public void work() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java index 1860bc1953ff7..4149439ecbb99 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java @@ -24,7 +24,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Sets; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.document.NoDocumentFoundException; @@ -245,7 +245,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { + Runtime.getRuntime().addShutdownHook(new SubjectInheritingThread(() -> { if (executorService != null) { executorService.shutdown(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java index 3e15671872c1a..0562ce64aa29b 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java @@ -35,7 +35,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopThread; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.timelineservice.metrics.PerNodeAggTimelineCollectorMetrics; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.CollectionType; @@ -280,7 +280,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new HadoopThread(() -> { + Runtime.getRuntime().addShutdownHook(new SubjectInheritingThread(() -> { if (executorService != null) { executorService.shutdown(); } From 2c48c438b244d1daa21647dfbcb71ec9bdac8e48 Mon Sep 17 00:00:00 2001 From: Istvan Toth Date: Thu, 28 Aug 2025 20:25:29 +0200 Subject: [PATCH 5/5] add tests to demonstrate JDK22+ Thread behaviour --- .../concurrent/TestSubjectPropagation.java | 89 ++++++++++++++++++- 1 file changed, 86 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java index 18dec16feb773..5e3e03fce58e8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/concurrent/TestSubjectPropagation.java @@ -19,6 +19,7 @@ package org.apache.hadoop.util.concurrent; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import java.util.concurrent.Callable; @@ -26,6 +27,7 @@ import org.apache.hadoop.security.authentication.util.SubjectUtil; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Shell; import org.junit.jupiter.api.Test; public class TestSubjectPropagation { @@ -33,13 +35,14 @@ public class TestSubjectPropagation { private Subject childSubject = null; @Test - public void testWork() { + public void testSubjectInheritingThreadOverride() { Subject parentSubject = new Subject(); childSubject = null; SubjectUtil.callAs(parentSubject, new Callable() { public Void call() throws InterruptedException { SubjectInheritingThread t = new SubjectInheritingThread() { + @Override public void work() { childSubject = SubjectUtil.current(); } @@ -54,7 +57,7 @@ public void work() { } @Test - public void testRunnable() { + public void testSubjectInheritingThreadRunnable() { Subject parentSubject = new Subject(); childSubject = null; @@ -78,13 +81,14 @@ public void run() { } @Test - public void testDeamonWork() { + public void testDaemonOverride() { Subject parentSubject = new Subject(); childSubject = null; SubjectUtil.callAs(parentSubject, new Callable() { public Void call() throws InterruptedException { Daemon t = new Daemon() { + @Override public void work() { childSubject = SubjectUtil.current(); } @@ -122,4 +126,83 @@ public void run() { assertEquals(parentSubject, childSubject); } + @Test + public void testThreadOverride() { + Subject parentSubject = new Subject(); + childSubject = null; + + SubjectUtil.callAs(parentSubject, new Callable() { + public Void call() throws InterruptedException { + + Thread t = new Thread() { + @Override + public void run() { + childSubject = SubjectUtil.current(); + } + }; + t.start(); + t.join(1000); + return (Void) null; + } + }); + + boolean securityManagerEnabled = true; + try { + SecurityManager sm = System.getSecurityManager(); + System.setSecurityManager(sm); + } catch (UnsupportedOperationException e) { + // JDK24+ always throws this + securityManagerEnabled = false; + } catch (Throwable t) { + // don't care + } + + if (Shell.isJavaVersionAtLeast(22) && !securityManagerEnabled) { + // This is the behaviour that breaks Hadoop authorization + assertNull(childSubject); + } else { + assertEquals(parentSubject, childSubject); + } + } + + @Test + public void testThreadRunnable() { + Subject parentSubject = new Subject(); + childSubject = null; + + SubjectUtil.callAs(parentSubject, new Callable() { + public Void call() throws InterruptedException { + Runnable r = new Runnable() { + @Override + public void run() { + childSubject = SubjectUtil.current(); + } + }; + + Thread t = new Thread(r); + t.start(); + t.join(1000); + return (Void) null; + } + }); + + boolean securityManagerEnabled = true; + try { + SecurityManager sm = System.getSecurityManager(); + System.setSecurityManager(sm); + } catch (UnsupportedOperationException e) { + // JDK24+ always throws this + securityManagerEnabled = false; + } catch (Throwable t) { + // don't care + } + + if (Shell.isJavaVersionAtLeast(22) && !securityManagerEnabled) { + // This is the behaviour that breaks Hadoop authorization + assertNull(childSubject); + } else { + assertEquals(parentSubject, childSubject); + } + } + }