diff --git a/frontend/server/src/main/java/org/pytorch/serve/wlm/WorkerThread.java b/frontend/server/src/main/java/org/pytorch/serve/wlm/WorkerThread.java index fd16698d16..793a2c1d1b 100644 --- a/frontend/server/src/main/java/org/pytorch/serve/wlm/WorkerThread.java +++ b/frontend/server/src/main/java/org/pytorch/serve/wlm/WorkerThread.java @@ -80,7 +80,6 @@ public class WorkerThread implements Runnable { private WorkerLifeCycle lifeCycle; private int responseTimeout; private long recoveryStartTS; // 0: default value. no recovery needed, in healthy mode - private String telemetryPath = System.getenv("SM_TELEMETRY_LOG_REV_2022_12"); public WorkerThread( ConfigManager configManager, @@ -179,9 +178,7 @@ public void run() { currentThread.set(thread); BaseModelRequest req = null; int status = HttpURLConnection.HTTP_INTERNAL_ERROR; - if (telemetryPath != null) { - loggerTelemetryMetrics.info("Telemetry enabled."); - } + try { connect(); @@ -286,7 +283,7 @@ public void run() { } catch (OutOfMemoryError oom) { logger.error("Out of memory error when creating workers", oom); status = HttpURLConnection.HTTP_ENTITY_TOO_LARGE; - if (telemetryPath != null) { + if (java.lang.System.getenv("SM_TELEMETRY_LOG") != null) { loggerTelemetryMetrics.info( "ModelServerError.Count:1|#TorchServe:{},{}:-1", ConfigManager.getInstance().getVersion(), @@ -294,7 +291,7 @@ public void run() { } } catch (Throwable t) { logger.warn("Backend worker thread exception.", t); - if (telemetryPath != null) { + if (java.lang.System.getenv("SM_TELEMETRY_LOG") != null) { loggerTelemetryMetrics.info( "ModelServerError.Count:1|#TorchServe:{},{}:-1", ConfigManager.getInstance().getVersion(),