Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SPARK-33285][CORE][SQL] Fix deprecated compilation warnings of "Auto-application to () is deprecated" in Scala 2.13 #30234

Closed
wants to merge 39 commits into from
Closed
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
3fe5641
fix core module part 1
LuciferYang Oct 30, 2020
87f9b6e
fix core module part 2
LuciferYang Nov 2, 2020
abaa141
fix sql catalyst module part 1
LuciferYang Nov 2, 2020
181409d
fix sql catalyst module part 2
LuciferYang Nov 2, 2020
31b2d58
fix sql catalyst module part 3
LuciferYang Nov 3, 2020
62ea87d
fix sql core module part 1
LuciferYang Nov 3, 2020
38fbc9f
fix sql core module part 2
LuciferYang Nov 3, 2020
656ff41
fix hive module
LuciferYang Nov 3, 2020
eb84d5e
fix thriftrserver module
LuciferYang Nov 3, 2020
dfe1a98
fix format
LuciferYang Nov 3, 2020
a8ac6bc
fix sql module
LuciferYang Nov 5, 2020
8881355
fix mllib
LuciferYang Nov 5, 2020
4a416a2
Revert "fix mllib"
LuciferYang Nov 6, 2020
03cde9c
Revert "fix sql module"
LuciferYang Nov 6, 2020
148eadc
Merge branch 'upmaster' into SPARK-33285.1
LuciferYang Nov 6, 2020
22ba82f
update core module
LuciferYang Nov 6, 2020
664b464
Merge branch 'upmaster' into SPARK-33285.1
LuciferYang Nov 16, 2020
760e519
fix Auto-application to () to is deprecated compile warings of mlli…
LuciferYang Nov 3, 2020
3d12196
fix Auto-application to () to is deprecated compile warings of grap…
LuciferYang Nov 16, 2020
312996f
fix Auto-application to () to is deprecated compile warings of stre…
LuciferYang Nov 2, 2020
93ecc5f
fix Auto-application to () to is deprecated compile warings of stre…
LuciferYang Nov 3, 2020
7b071da
fix Auto-application to () to is deprecated compile warings of mlli…
LuciferYang Nov 3, 2020
8a2526f
refix core module
LuciferYang Nov 16, 2020
678ee57
fix Auto-application to () to is deprecated compile warings of too…
LuciferYang Nov 16, 2020
2f37009
fix Auto-application to () to is deprecated compile warings of rep…
LuciferYang Nov 16, 2020
29030fd
fix Auto-application to () to is deprecated compile warings of avro…
LuciferYang Nov 17, 2020
71ccc3f
fix Auto-application to () to is deprecated compile warings of kine…
LuciferYang Nov 17, 2020
f20755a
fix Auto-application to () to is deprecated compile warings of kafk…
LuciferYang Nov 17, 2020
eb0a65e
fix Auto-application to () to is deprecated compile warings of reso…
LuciferYang Nov 17, 2020
0fb61a7
fix Auto-application to () to is deprecated compile warings of hado…
LuciferYang Nov 17, 2020
97df521
fix Auto-application to () to is deprecated compile warings of exam…
LuciferYang Nov 17, 2020
2082c3b
checking
LuciferYang Nov 18, 2020
38a6831
Merge branch 'upmaster' into SPARK-33285.1
LuciferYang Nov 18, 2020
9284fdc
fix left
LuciferYang Nov 18, 2020
d8f1db0
try to add lint-eta-zero compile arg
LuciferYang Nov 19, 2020
176d8fd
Merge branch 'upmaster' into SPARK-33285.1
LuciferYang Nov 19, 2020
1a2b123
change to use -Xlint:eta-zero
LuciferYang Nov 19, 2020
f953d87
try to suppress Auto-application
LuciferYang Dec 14, 2020
4253839
Merge branch 'upmaster' into SPARK-33285.1
LuciferYang Dec 14, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ private[spark] class BarrierCoordinator(
private val listener = new SparkListener {
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = {
val stageInfo = stageCompleted.stageInfo
val barrierId = ContextBarrierId(stageInfo.stageId, stageInfo.attemptNumber)
val barrierId = ContextBarrierId(stageInfo.stageId, stageInfo.attemptNumber())
// Clear ContextBarrierState from a finished stage attempt.
cleanupBarrierStage(barrierId)
}
Expand Down
20 changes: 11 additions & 9 deletions core/src/main/scala/org/apache/spark/BarrierTaskContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,16 @@ class BarrierTaskContext private[spark] (
private lazy val numTasks = getTaskInfos().size

private def runBarrier(message: String, requestMethod: RequestMethod.Value): Array[String] = {
logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) has entered " +
s"the global sync, current barrier epoch is $barrierEpoch.")
logInfo(s"Task ${taskAttemptId()} from Stage ${stageId()}(Attempt ${stageAttemptNumber()})" +
s" has entered the global sync, current barrier epoch is $barrierEpoch.")
logTrace("Current callSite: " + Utils.getCallSite())

val startTime = System.currentTimeMillis()
val timerTask = new TimerTask {
override def run(): Unit = {
logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) waiting " +
s"under the global sync since $startTime, has been waiting for " +
logInfo(s"Task ${taskAttemptId()} from Stage ${stageId()}" +
s"(Attempt ${stageAttemptNumber()}) waiting under the global sync since $startTime, " +
"has been waiting for " +
s"${MILLISECONDS.toSeconds(System.currentTimeMillis() - startTime)} seconds, " +
s"current barrier epoch is $barrierEpoch.")
}
Expand All @@ -79,8 +80,8 @@ class BarrierTaskContext private[spark] (

try {
val abortableRpcFuture = barrierCoordinator.askAbortable[Array[String]](
message = RequestToSync(numTasks, stageId, stageAttemptNumber, taskAttemptId,
barrierEpoch, partitionId, message, requestMethod),
message = RequestToSync(numTasks, stageId(), stageAttemptNumber(), taskAttemptId(),
barrierEpoch, partitionId(), message, requestMethod),
// Set a fixed timeout for RPC here, so users shall get a SparkException thrown by
// BarrierCoordinator on timeout, instead of RPCTimeoutException from the RPC framework.
timeout = new RpcTimeout(365.days, "barrierTimeout"))
Expand All @@ -107,15 +108,16 @@ class BarrierTaskContext private[spark] (
val messages = abortableRpcFuture.future.value.get.get

barrierEpoch += 1
logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) finished " +
logInfo(s"Task ${taskAttemptId()} from Stage ${stageId()}" +
s"(Attempt ${stageAttemptNumber()}) finished " +
"global sync successfully, waited for " +
s"${MILLISECONDS.toSeconds(System.currentTimeMillis() - startTime)} seconds, " +
s"current barrier epoch is $barrierEpoch.")
messages
} catch {
case e: SparkException =>
logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) failed " +
"to perform global sync, waited for " +
logInfo(s"Task ${taskAttemptId()} from Stage ${stageId()}" +
s"(Attempt ${stageAttemptNumber()}) failed to perform global sync, waited for " +
s"${MILLISECONDS.toSeconds(System.currentTimeMillis() - startTime)} seconds, " +
s"current barrier epoch is $barrierEpoch.")
throw e
Expand Down
2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/Heartbeater.scala
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ private[spark] class Heartbeater(
/** Schedules a task to report a heartbeat. */
def start(): Unit = {
// Wait a random interval so the heartbeats don't end up in sync
val initialDelay = intervalMs + (math.random * intervalMs).asInstanceOf[Int]
val initialDelay = intervalMs + (math.random() * intervalMs).asInstanceOf[Int]

val heartbeatTask = new Runnable() {
override def run(): Unit = Utils.logUncaughtExceptions(reportHeartbeat())
Expand Down
4 changes: 2 additions & 2 deletions core/src/main/scala/org/apache/spark/SecurityManager.scala
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ private[spark] class SecurityManager(
* @return the secret key as a String if authentication is enabled, otherwise returns null
*/
def getSecretKey(): String = {
if (isAuthenticationEnabled) {
if (isAuthenticationEnabled()) {
val creds = UserGroupInformation.getCurrentUser().getCredentials()
Option(creds.getSecretKey(SECRET_LOOKUP_KEY))
.map { bytes => new String(bytes, UTF_8) }
Expand Down Expand Up @@ -372,7 +372,7 @@ private[spark] class SecurityManager(
aclUsers: Set[String],
aclGroups: Set[String]): Boolean = {
if (user == null ||
!aclsEnabled ||
!aclsEnabled() ||
aclUsers.contains(WILDCARD_ACL) ||
aclUsers.contains(user) ||
aclGroups.contains(WILDCARD_ACL)) {
Expand Down
12 changes: 6 additions & 6 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1491,7 +1491,7 @@ class SparkContext(config: SparkConf) extends Logging {
require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass),
"Can not directly broadcast RDDs; instead, call collect() and broadcast the result.")
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
val callSite = getCallSite()
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
Expand Down Expand Up @@ -2121,7 +2121,7 @@ class SparkContext(config: SparkConf) extends Logging {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val callSite = getCallSite()
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
Expand Down Expand Up @@ -2243,7 +2243,7 @@ class SparkContext(config: SparkConf) extends Logging {
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
val callSite = getCallSite()
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
Expand Down Expand Up @@ -2273,7 +2273,7 @@ class SparkContext(config: SparkConf) extends Logging {
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val callSite = getCallSite()
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
Expand Down Expand Up @@ -2424,7 +2424,7 @@ class SparkContext(config: SparkConf) extends Logging {
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
taskScheduler.defaultParallelism()
}

/**
Expand Down Expand Up @@ -2582,7 +2582,7 @@ object SparkContext extends Logging {
* Throws an exception if a SparkContext is about to be created in executors.
*/
private def assertOnDriver(): Unit = {
if (TaskContext.get != null) {
if (TaskContext.get() != null) {
// we're accessing it during task execution, fail.
throw new IllegalStateException(
"SparkContext should only be created and accessed on the driver.")
Expand Down
20 changes: 10 additions & 10 deletions core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ private[spark] abstract class BasePythonRunner[IN, OUT](

/** Terminates the writer thread, ignoring any exceptions that may occur due to cleanup. */
def shutdownOnTaskCompletion(): Unit = {
assert(context.isCompleted)
assert(context.isCompleted())
this.interrupt()
}

Expand Down Expand Up @@ -399,7 +399,7 @@ private[spark] abstract class BasePythonRunner[IN, OUT](
dataOut.flush()
} catch {
case t: Throwable if (NonFatal(t) || t.isInstanceOf[Exception]) =>
if (context.isCompleted || context.isInterrupted) {
if (context.isCompleted() || context.isInterrupted()) {
logDebug("Exception/NonFatal Error thrown after task completion (likely due to " +
"cleanup)", t)
if (!worker.isClosed) {
Expand Down Expand Up @@ -503,8 +503,8 @@ private[spark] abstract class BasePythonRunner[IN, OUT](
init, finish))
val memoryBytesSpilled = stream.readLong()
val diskBytesSpilled = stream.readLong()
context.taskMetrics.incMemoryBytesSpilled(memoryBytesSpilled)
context.taskMetrics.incDiskBytesSpilled(diskBytesSpilled)
context.taskMetrics().incMemoryBytesSpilled(memoryBytesSpilled)
context.taskMetrics().incDiskBytesSpilled(diskBytesSpilled)
}

protected def handlePythonException(): PythonException = {
Expand Down Expand Up @@ -536,7 +536,7 @@ private[spark] abstract class BasePythonRunner[IN, OUT](
}

protected val handleException: PartialFunction[Throwable, OUT] = {
case e: Exception if context.isInterrupted =>
case e: Exception if context.isInterrupted() =>
logDebug("Exception thrown after task interruption", e)
throw new TaskKilledException(context.getKillReason().getOrElse("unknown reason"))

Expand Down Expand Up @@ -566,16 +566,16 @@ private[spark] abstract class BasePythonRunner[IN, OUT](
override def run(): Unit = {
// Kill the worker if it is interrupted, checking until task completion.
// TODO: This has a race condition if interruption occurs, as completed may still become true.
while (!context.isInterrupted && !context.isCompleted) {
while (!context.isInterrupted() && !context.isCompleted()) {
Thread.sleep(2000)
}
if (!context.isCompleted) {
if (!context.isCompleted()) {
Thread.sleep(taskKillTimeout)
if (!context.isCompleted) {
if (!context.isCompleted()) {
try {
// Mimic the task name used in `Executor` to help the user find out the task to blame.
val taskName = s"${context.partitionId}.${context.attemptNumber} " +
s"in stage ${context.stageId} (TID ${context.taskAttemptId})"
val taskName = s"${context.partitionId()}.${context.attemptNumber()} " +
s"in stage ${context.stageId()} (TID ${context.taskAttemptId()})"
logWarning(s"Incomplete task $taskName interrupted: Attempting to kill Python Worker")
env.destroyPythonWorker(pythonExec, envVars.asScala.toMap, worker)
} catch {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ private[spark] class BufferedStreamThread(
val lines = new Array[String](errBufferSize)
var lineIdx = 0
override def run(): Unit = {
for (line <- Source.fromInputStream(in).getLines) {
for (line <- Source.fromInputStream(in).getLines()) {
synchronized {
lines(lineIdx) = line
lineIdx = (lineIdx + 1) % errBufferSize
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ private[deploy] object JsonProtocol {

private def writeResourcesInfo(info: Map[String, ResourceInformation]): JObject = {
val jsonFields = info.map {
case (k, v) => JField(k, v.toJson)
case (k, v) => JField(k, v.toJson())
}
JObject(jsonFields.toList)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
stream.flush()

// Get the output and discard any unnecessary lines from it.
Source.fromString(new String(out.toByteArray(), StandardCharsets.UTF_8)).getLines
Source.fromString(new String(out.toByteArray(), StandardCharsets.UTF_8)).getLines()
.filter { line =>
!line.startsWith("log4j") && !line.startsWith("usage")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
if (!Utils.isTesting) {
ThreadUtils.newDaemonFixedThreadPool(NUM_PROCESSING_THREADS, "log-replay-executor")
} else {
ThreadUtils.sameThreadExecutorService
ThreadUtils.sameThreadExecutorService()
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ private[history] class HistoryPage(parent: HistoryServer) extends WebUIPage("")
} else if (eventLogsUnderProcessCount > 0) {
<h4>No completed applications found!</h4>
} else {
<h4>No completed applications found!</h4> ++ parent.emptyListingHtml
<h4>No completed applications found!</h4> ++ parent.emptyListingHtml()
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ object CommandUtils extends Logging {
}

// set auth secret to env variable if needed
if (securityMgr.isAuthenticationEnabled) {
newEnvironment += (SecurityManager.ENV_AUTH_SECRET -> securityMgr.getSecretKey)
if (securityMgr.isAuthenticationEnabled()) {
newEnvironment += (SecurityManager.ENV_AUTH_SECRET -> securityMgr.getSecretKey())
}

Command(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ private[deploy] class Worker(
private val FUZZ_MULTIPLIER_INTERVAL_LOWER_BOUND = 0.500
private val REGISTRATION_RETRY_FUZZ_MULTIPLIER = {
val randomNumberGenerator = new Random(UUID.randomUUID.getMostSignificantBits)
randomNumberGenerator.nextDouble + FUZZ_MULTIPLIER_INTERVAL_LOWER_BOUND
randomNumberGenerator.nextDouble() + FUZZ_MULTIPLIER_INTERVAL_LOWER_BOUND
}
private val INITIAL_REGISTRATION_RETRY_INTERVAL_SECONDS = (math.round(10 *
REGISTRATION_RETRY_FUZZ_MULTIPLIER))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ private[spark] class ProcfsMetricsGetter(procfsDir: String = "/proc/") extends L
val f = new File(new File(procfsDir, pid.toString), procfsStatFile)
new BufferedReader(new InputStreamReader(new FileInputStream(f), UTF_8))
}
Utils.tryWithResource(openReader) { in =>
Utils.tryWithResource(openReader()) { in =>
val procInfo = in.readLine
val procInfoSplit = procInfo.split(" ")
val vmem = procInfoSplit(22).toLong
Expand Down Expand Up @@ -207,7 +207,7 @@ private[spark] class ProcfsMetricsGetter(procfsDir: String = "/proc/") extends L
if (!isAvailable) {
return ProcfsMetrics(0, 0, 0, 0, 0, 0)
}
val pids = computeProcessTree
val pids = computeProcessTree()
var allMetrics = ProcfsMetrics(0, 0, 0, 0, 0, 0)
for (p <- pids) {
allMetrics = addProcfsMetricsFromOneProcess(allMetrics, p)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ private[spark] abstract class StreamBasedRecordReader[T](
if (!processed) {
val fileIn = new PortableDataStream(split, context, index)
value = parseStream(fileIn)
key = fileIn.getPath
key = fileIn.getPath()
processed = true
true
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,14 @@ object SparkHadoopWriter extends Logging {
val ret = sparkContext.runJob(rdd, (context: TaskContext, iter: Iterator[(K, V)]) => {
// SPARK-24552: Generate a unique "attempt ID" based on the stage and task attempt numbers.
// Assumes that there won't be more than Short.MaxValue attempts, at least not concurrently.
val attemptId = (context.stageAttemptNumber << 16) | context.attemptNumber
val attemptId = (context.stageAttemptNumber() << 16) | context.attemptNumber()

executeTask(
context = context,
config = config,
jobTrackerId = jobTrackerId,
commitJobId = commitJobId,
sparkPartitionId = context.partitionId,
sparkPartitionId = context.partitionId(),
sparkAttemptNumber = attemptId,
committer = committer,
iterator = iter)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,13 @@ private[spark] abstract class LauncherBackend {
}

def setAppId(appId: String): Unit = {
if (connection != null && isConnected) {
if (connection != null && isConnected()) {
connection.send(new SetAppId(appId))
}
}

def setState(state: SparkAppHandle.State): Unit = {
if (connection != null && isConnected && lastState != state) {
if (connection != null && isConnected() && lastState != state) {
connection.send(new SetState(state))
lastState = state
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ private[spark] class UnifiedMemoryManager(
}

executionPool.acquireMemory(
numBytes, taskAttemptId, maybeGrowExecutionPool, () => computeMaxExecutionPoolSize)
numBytes, taskAttemptId, maybeGrowExecutionPool, () => computeMaxExecutionPoolSize())
}

override def acquireStorageMemory(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,12 @@ private[spark] class MetricsSystem private (
registerSources()
}
registerSinks()
sinks.foreach(_.start)
sinks.foreach(_.start())
}

def stop(): Unit = {
if (running) {
sinks.foreach(_.stop)
sinks.foreach(_.stop())
registry.removeMatching((_: String, _: Metric) => true)
} else {
logWarning("Stopping a MetricsSystem that is not running")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class AsyncRDDActions[T: ClassTag](self: RDD[T]) extends Serializable with Loggi
* Returns a future for retrieving the first num elements of the RDD.
*/
def takeAsync(num: Int): FutureAction[Seq[T]] = self.withScope {
val callSite = self.context.getCallSite
val callSite = self.context.getCallSite()
val localProperties = self.context.getLocalProperties
// Cached thread pool to handle aggregation of subtasks.
implicit val executionContext = AsyncRDDActions.futureExecutionContext
Expand Down
2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ class HadoopRDD[K, V](
private val inputFormat = getInputFormat(jobConf)
HadoopRDD.addLocalConfiguration(
new SimpleDateFormat("yyyyMMddHHmmss", Locale.US).format(createTime),
context.stageId, theSplit.index, context.attemptNumber, jobConf)
context.stageId(), theSplit.index, context.attemptNumber(), jobConf)

reader =
try {
Expand Down
4 changes: 2 additions & 2 deletions core/src/main/scala/org/apache/spark/rdd/PipedRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ private[spark] class PipedRDD[T: ClassTag](
override def run(): Unit = {
val err = proc.getErrorStream
try {
for (line <- Source.fromInputStream(err)(encoding).getLines) {
for (line <- Source.fromInputStream(err)(encoding).getLines()) {
// scalastyle:off println
System.err.println(line)
// scalastyle:on println
Expand Down Expand Up @@ -180,7 +180,7 @@ private[spark] class PipedRDD[T: ClassTag](
}

// Return an iterator that read lines from the process's stdout
val lines = Source.fromInputStream(proc.getInputStream)(encoding).getLines
val lines = Source.fromInputStream(proc.getInputStream)(encoding).getLines()
new Iterator[String] {
def next(): String = {
if (!hasNext()) {
Expand Down
Loading