From c69deed50e81cc1805f6f82ebb10513a211cbbe2 Mon Sep 17 00:00:00 2001 From: Adam <103067949+AdamL-Microsoft@users.noreply.github.com> Date: Tue, 29 Aug 2023 12:57:19 -0700 Subject: [PATCH] Release 8.7.1 (hotfix) (#3459) * Remove the retention policy setting (#3452) --------- Co-authored-by: Cheick Keita --- .devcontainer/devcontainer.json | 3 +- .github/workflows/ci.yml | 2 - CHANGELOG.md | 6 + CURRENT_VERSION | 2 +- .../ApiService/Functions/QueueJobResult.cs | 60 ------- .../ApiService/OneFuzzTypes/Model.cs | 45 ----- src/ApiService/ApiService/Program.cs | 1 - .../ApiService/onefuzzlib/Config.cs | 1 - .../ApiService/onefuzzlib/Extension.cs | 44 +++-- .../onefuzzlib/JobResultOperations.cs | 121 ------------- .../ApiService/onefuzzlib/OnefuzzContext.cs | 2 - .../IntegrationTests/Fakes/TestContext.cs | 3 - src/agent/Cargo.lock | 16 -- src/agent/Cargo.toml | 1 - src/agent/onefuzz-agent/src/config.rs | 12 -- src/agent/onefuzz-agent/src/log_uploader.rs | 29 ++++ src/agent/onefuzz-agent/src/work.rs | 5 +- src/agent/onefuzz-result/Cargo.toml | 18 -- src/agent/onefuzz-result/src/job_result.rs | 129 -------------- src/agent/onefuzz-result/src/lib.rs | 4 - src/agent/onefuzz-task/Cargo.toml | 1 - src/agent/onefuzz-task/src/local/cmd.rs | 42 ++++- src/agent/onefuzz-task/src/local/common.rs | 26 ++- .../example_templates/libfuzzer_basic.yml | 34 ++-- .../src/local/generic_analysis.rs | 137 ++++++++++++++- .../src/local/generic_crash_report.rs | 138 ++++++++++++++- .../src/local/generic_generator.rs | 142 ++++++++++++++- src/agent/onefuzz-task/src/local/libfuzzer.rs | 161 +++++++++++++++++- .../src/local/libfuzzer_crash_report.rs | 128 +++++++++++++- .../onefuzz-task/src/local/libfuzzer_merge.rs | 84 ++++++++- .../src/local/libfuzzer_regression.rs | 134 ++++++++++++++- .../src/local/libfuzzer_test_input.rs | 83 +++++++++ src/agent/onefuzz-task/src/local/mod.rs | 1 + src/agent/onefuzz-task/src/local/radamsa.rs | 78 +++++++++ src/agent/onefuzz-task/src/local/schema.json | 8 +- src/agent/onefuzz-task/src/local/template.rs | 13 +- .../onefuzz-task/src/local/test_input.rs | 86 ++++++++++ .../src/tasks/analysis/generic.rs | 5 +- src/agent/onefuzz-task/src/tasks/config.rs | 20 --- .../src/tasks/coverage/generic.rs | 19 +-- .../onefuzz-task/src/tasks/fuzz/generator.rs | 7 +- .../src/tasks/fuzz/libfuzzer/common.rs | 49 ++---- .../onefuzz-task/src/tasks/fuzz/supervisor.rs | 15 +- src/agent/onefuzz-task/src/tasks/heartbeat.rs | 2 +- .../onefuzz-task/src/tasks/merge/generic.rs | 2 +- .../src/tasks/merge/libfuzzer_merge.rs | 2 +- .../src/tasks/regression/common.rs | 15 +- .../src/tasks/regression/generic.rs | 3 +- .../src/tasks/regression/libfuzzer.rs | 3 +- .../src/tasks/report/crash_report.rs | 45 +---- .../src/tasks/report/dotnet/generic.rs | 22 +-- .../onefuzz-task/src/tasks/report/generic.rs | 14 +- .../src/tasks/report/libfuzzer_report.rs | 5 - src/agent/onefuzz/Cargo.toml | 1 - src/agent/onefuzz/src/blob/url.rs | 23 +-- src/agent/onefuzz/src/syncdir.rs | 66 +------ .../bicep-templates/storageAccounts.bicep | 2 +- src/integration-tests/integration-test.py | 77 ++------- src/runtime-tools/linux/setup.sh | 64 ++----- 59 files changed, 1389 insertions(+), 872 deletions(-) delete mode 100644 src/ApiService/ApiService/Functions/QueueJobResult.cs delete mode 100644 src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs delete mode 100644 src/agent/onefuzz-result/Cargo.toml delete mode 100644 src/agent/onefuzz-result/src/job_result.rs delete mode 100644 src/agent/onefuzz-result/src/lib.rs create mode 100644 src/agent/onefuzz-task/src/local/radamsa.rs mode change 100644 => 100755 src/runtime-tools/linux/setup.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d3fcf050ed..4059b3d7c1 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,7 +13,6 @@ "**/target/**": true }, "lldb.executable": "/usr/bin/lldb", - "dotnet.server.useOmnisharp": true, "omnisharp.enableEditorConfigSupport": true, "omnisharp.enableRoslynAnalyzers": true, "python.defaultInterpreterPath": "/workspaces/onefuzz/src/venv/bin/python", @@ -49,4 +48,4 @@ "features": { "ghcr.io/devcontainers/features/azure-cli:1": {} } -} \ No newline at end of file +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2dd85d7c92..12824fd182 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -542,11 +542,9 @@ jobs: mkdir -p artifacts/linux-libfuzzer mkdir -p artifacts/linux-libfuzzer-with-options - mkdir -p artifacts/mariner-libfuzzer (cd libfuzzer ; make ) cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer-with-options - cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/mariner-libfuzzer mkdir -p artifacts/linux-libfuzzer-regression (cd libfuzzer-regression ; make ) diff --git a/CHANGELOG.md b/CHANGELOG.md index be4779ad77..8d46ea2a0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 8.7.1 + +### Fixed + +* Service: Removed deprecated Azure retention policy setting that was causing scaleset deployment errors [#3452](https://github.com/microsoft/onefuzz/pull/3452) + ## 8.7.0 ### Added diff --git a/CURRENT_VERSION b/CURRENT_VERSION index c0bcaebe8f..efeecbe2c5 100644 --- a/CURRENT_VERSION +++ b/CURRENT_VERSION @@ -1 +1 @@ -8.7.0 \ No newline at end of file +8.7.1 \ No newline at end of file diff --git a/src/ApiService/ApiService/Functions/QueueJobResult.cs b/src/ApiService/ApiService/Functions/QueueJobResult.cs deleted file mode 100644 index d781a4d1e1..0000000000 --- a/src/ApiService/ApiService/Functions/QueueJobResult.cs +++ /dev/null @@ -1,60 +0,0 @@ -using System.Text.Json; -using Microsoft.Azure.Functions.Worker; -using Microsoft.Extensions.Logging; -using Microsoft.OneFuzz.Service.OneFuzzLib.Orm; -namespace Microsoft.OneFuzz.Service.Functions; - - -public class QueueJobResult { - private readonly ILogger _log; - private readonly IOnefuzzContext _context; - - public QueueJobResult(ILogger logTracer, IOnefuzzContext context) { - _log = logTracer; - _context = context; - } - - [Function("QueueJobResult")] - public async Async.Task Run([QueueTrigger("job-result", Connection = "AzureWebJobsStorage")] string msg) { - - var _tasks = _context.TaskOperations; - var _jobs = _context.JobOperations; - - _log.LogInformation("job result: {msg}", msg); - var jr = JsonSerializer.Deserialize(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}"); - - var task = await _tasks.GetByTaskId(jr.TaskId); - if (task == null) { - _log.LogWarning("invalid {TaskId}", jr.TaskId); - return; - } - - var job = await _jobs.Get(task.JobId); - if (job == null) { - _log.LogWarning("invalid {JobId}", task.JobId); - return; - } - - JobResultData? data = jr.Data; - if (data == null) { - _log.LogWarning($"job result data is empty, throwing out: {jr}"); - return; - } - - var jobResultType = data.Type; - _log.LogInformation($"job result data type: {jobResultType}"); - - Dictionary value; - if (jr.Value.Count > 0) { - value = jr.Value; - } else { - _log.LogWarning($"job result data is empty, throwing out: {jr}"); - return; - } - - var jobResult = await _context.JobResultOperations.CreateOrUpdate(job.JobId, jobResultType, value); - if (!jobResult.IsOk) { - _log.LogError("failed to create or update with job result {JobId}", job.JobId); - } - } -} diff --git a/src/ApiService/ApiService/OneFuzzTypes/Model.cs b/src/ApiService/ApiService/OneFuzzTypes/Model.cs index b839f52ddc..e430c1448c 100644 --- a/src/ApiService/ApiService/OneFuzzTypes/Model.cs +++ b/src/ApiService/ApiService/OneFuzzTypes/Model.cs @@ -33,19 +33,6 @@ public enum HeartbeatType { TaskAlive, } -[SkipRename] -public enum JobResultType { - NewCrashingInput, - NoReproCrashingInput, - NewReport, - NewUniqueReport, - NewRegressionReport, - NewCoverage, - NewCrashDump, - CoverageData, - RuntimeStats, -} - public record HeartbeatData(HeartbeatType Type); public record TaskHeartbeatEntry( @@ -54,16 +41,6 @@ public record TaskHeartbeatEntry( Guid MachineId, HeartbeatData[] Data); -public record JobResultData(JobResultType Type); - -public record TaskJobResultEntry( - Guid TaskId, - Guid? JobId, - Guid MachineId, - JobResultData Data, - Dictionary Value - ); - public record NodeHeartbeatEntry(Guid NodeId, HeartbeatData[] Data); public record NodeCommandStopIfFree(); @@ -915,27 +892,6 @@ public record SecretAddress(Uri Url) : ISecret { public record SecretData(ISecret Secret) { } -public record JobResult( - [PartitionKey][RowKey] Guid JobId, - string Project, - string Name, - double NewCrashingInput = 0, - double NoReproCrashingInput = 0, - double NewReport = 0, - double NewUniqueReport = 0, - double NewRegressionReport = 0, - double NewCrashDump = 0, - double InstructionsCovered = 0, - double TotalInstructions = 0, - double CoverageRate = 0, - double IterationCount = 0 -) : EntityBase() { - public JobResult(Guid JobId, string Project, string Name) : this( - JobId: JobId, - Project: Project, - Name: Name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) { } -} - public record JobConfig( string Project, string Name, @@ -1100,7 +1056,6 @@ public record TaskUnitConfig( string? InstanceTelemetryKey, string? MicrosoftTelemetryKey, Uri HeartbeatQueue, - Uri JobResultQueue, Dictionary Tags ) { public Uri? inputQueue { get; set; } diff --git a/src/ApiService/ApiService/Program.cs b/src/ApiService/ApiService/Program.cs index d5ee30b45e..f425c00809 100644 --- a/src/ApiService/ApiService/Program.cs +++ b/src/ApiService/ApiService/Program.cs @@ -118,7 +118,6 @@ public class LoggingMiddleware : IFunctionsWorkerMiddleware { .AddScoped() .AddScoped() .AddScoped() - .AddScoped() .AddScoped() .AddScoped() .AddScoped() diff --git a/src/ApiService/ApiService/onefuzzlib/Config.cs b/src/ApiService/ApiService/onefuzzlib/Config.cs index 872cedbc01..71af317348 100644 --- a/src/ApiService/ApiService/onefuzzlib/Config.cs +++ b/src/ApiService/ApiService/onefuzzlib/Config.cs @@ -71,7 +71,6 @@ public class Config : IConfig { InstanceTelemetryKey: _serviceConfig.ApplicationInsightsInstrumentationKey, MicrosoftTelemetryKey: _serviceConfig.OneFuzzTelemetry, HeartbeatQueue: await _queue.GetQueueSas("task-heartbeat", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), - JobResultQueue: await _queue.GetQueueSas("job-result", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), Tags: task.Config.Tags ?? new Dictionary() ); diff --git a/src/ApiService/ApiService/onefuzzlib/Extension.cs b/src/ApiService/ApiService/onefuzzlib/Extension.cs index fbf62dd343..7995026eca 100644 --- a/src/ApiService/ApiService/onefuzzlib/Extension.cs +++ b/src/ApiService/ApiService/onefuzzlib/Extension.cs @@ -36,9 +36,7 @@ public class Extensions : IExtensions { var extensions = new List(); var instanceConfig = await _context.ConfigOperations.Fetch(); - if (vmOs == Os.Windows) { - extensions.Add(await MonitorExtension(region)); - } + extensions.Add(await MonitorExtension(region, vmOs)); var depenency = DependencyExtension(region, vmOs); if (depenency is not null) { @@ -331,21 +329,37 @@ private sealed class Settings { throw new NotSupportedException($"unsupported OS: {vmOs}"); } - public async Async.Task MonitorExtension(AzureLocation region) { + public async Async.Task MonitorExtension(AzureLocation region, Os vmOs) { var settings = await _context.LogAnalytics.GetMonitorSettings(); var extensionSettings = JsonSerializer.Serialize(new { WorkspaceId = settings.Id }, _extensionSerializerOptions); var protectedExtensionSettings = JsonSerializer.Serialize(new { WorkspaceKey = settings.Key }, _extensionSerializerOptions); - return new VMExtensionWrapper { - Location = region, - Name = "OMSExtension", - TypePropertiesType = "MicrosoftMonitoringAgent", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; + if (vmOs == Os.Windows) { + return new VMExtensionWrapper { + Location = region, + Name = "OMSExtension", + TypePropertiesType = "MicrosoftMonitoringAgent", + Publisher = "Microsoft.EnterpriseCloud.Monitoring", + TypeHandlerVersion = "1.0", + AutoUpgradeMinorVersion = true, + Settings = new BinaryData(extensionSettings), + ProtectedSettings = new BinaryData(protectedExtensionSettings), + EnableAutomaticUpgrade = false + }; + } else if (vmOs == Os.Linux) { + return new VMExtensionWrapper { + Location = region, + Name = "OmsAgentForLinux", + TypePropertiesType = "OmsAgentForLinux", + Publisher = "Microsoft.EnterpriseCloud.Monitoring", + TypeHandlerVersion = "1.0", + AutoUpgradeMinorVersion = true, + Settings = new BinaryData(extensionSettings), + ProtectedSettings = new BinaryData(protectedExtensionSettings), + EnableAutomaticUpgrade = false + }; + } else { + throw new NotSupportedException($"unsupported os: {vmOs}"); + } } diff --git a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs deleted file mode 100644 index 1166cf91d4..0000000000 --- a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs +++ /dev/null @@ -1,121 +0,0 @@ -using ApiService.OneFuzzLib.Orm; -using Microsoft.Extensions.Logging; -using Polly; -namespace Microsoft.OneFuzz.Service; - -public interface IJobResultOperations : IOrm { - - Async.Task GetJobResult(Guid jobId); - Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue); - -} -public class JobResultOperations : Orm, IJobResultOperations { - - public JobResultOperations(ILogger log, IOnefuzzContext context) - : base(log, context) { - } - - public async Async.Task GetJobResult(Guid jobId) { - return await SearchByPartitionKeys(new[] { jobId.ToString() }).SingleOrDefaultAsync(); - } - - private JobResult UpdateResult(JobResult result, JobResultType type, Dictionary resultValue) { - - var newResult = result; - double newValue; - switch (type) { - case JobResultType.NewCrashingInput: - newValue = result.NewCrashingInput + resultValue["count"]; - newResult = result with { NewCrashingInput = newValue }; - break; - case JobResultType.NewReport: - newValue = result.NewReport + resultValue["count"]; - newResult = result with { NewReport = newValue }; - break; - case JobResultType.NewUniqueReport: - newValue = result.NewUniqueReport + resultValue["count"]; - newResult = result with { NewUniqueReport = newValue }; - break; - case JobResultType.NewRegressionReport: - newValue = result.NewRegressionReport + resultValue["count"]; - newResult = result with { NewRegressionReport = newValue }; - break; - case JobResultType.NewCrashDump: - newValue = result.NewCrashDump + resultValue["count"]; - newResult = result with { NewCrashDump = newValue }; - break; - case JobResultType.CoverageData: - double newCovered = resultValue["covered"]; - double newTotalCovered = resultValue["features"]; - double newCoverageRate = resultValue["rate"]; - newResult = result with { InstructionsCovered = newCovered, TotalInstructions = newTotalCovered, CoverageRate = newCoverageRate }; - break; - case JobResultType.RuntimeStats: - double newTotalIterations = resultValue["total_count"]; - newResult = result with { IterationCount = newTotalIterations }; - break; - default: - _logTracer.LogWarning($"Invalid Field {type}."); - break; - } - _logTracer.LogInformation($"Attempting to log new result: {newResult}"); - return newResult; - } - - private async Async.Task TryUpdate(Job job, JobResultType resultType, Dictionary resultValue) { - var jobId = job.JobId; - - var jobResult = await GetJobResult(jobId); - - if (jobResult == null) { - _logTracer.LogInformation("Creating new JobResult for Job {JobId}", jobId); - - var entry = new JobResult(JobId: jobId, Project: job.Config.Project, Name: job.Config.Name); - - jobResult = UpdateResult(entry, resultType, resultValue); - - var r = await Insert(jobResult); - if (!r.IsOk) { - throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); - } - _logTracer.LogInformation("created job result {JobId}", jobResult.JobId); - } else { - _logTracer.LogInformation("Updating existing JobResult entry for Job {JobId}", jobId); - - jobResult = UpdateResult(jobResult, resultType, resultValue); - - var r = await Update(jobResult); - if (!r.IsOk) { - throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); - } - _logTracer.LogInformation("updated job result {JobId}", jobResult.JobId); - } - - return true; - } - - public async Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue) { - - var job = await _context.JobOperations.Get(jobId); - if (job == null) { - return OneFuzzResultVoid.Error(ErrorCode.INVALID_REQUEST, "invalid job"); - } - - var success = false; - try { - _logTracer.LogInformation("attempt to update job result {JobId}", job.JobId); - var policy = Policy.Handle().WaitAndRetryAsync(50, _ => new TimeSpan(0, 0, 5)); - await policy.ExecuteAsync(async () => { - success = await TryUpdate(job, resultType, resultValue); - _logTracer.LogInformation("attempt {success}", success); - }); - return OneFuzzResultVoid.Ok; - } catch (Exception e) { - return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, new string[] { - $"Unexpected failure when attempting to update job result for {job.JobId}", - $"Exception: {e}" - }); - } - } -} - diff --git a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs index 03c6322663..d877bfddbb 100644 --- a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs +++ b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs @@ -19,7 +19,6 @@ public interface IOnefuzzContext { IExtensions Extensions { get; } IIpOperations IpOperations { get; } IJobOperations JobOperations { get; } - IJobResultOperations JobResultOperations { get; } ILogAnalytics LogAnalytics { get; } INodeMessageOperations NodeMessageOperations { get; } INodeOperations NodeOperations { get; } @@ -84,7 +83,6 @@ public class OnefuzzContext : IOnefuzzContext { public IVmOperations VmOperations => _serviceProvider.GetRequiredService(); public ISecretsOperations SecretsOperations => _serviceProvider.GetRequiredService(); public IJobOperations JobOperations => _serviceProvider.GetRequiredService(); - public IJobResultOperations JobResultOperations => _serviceProvider.GetRequiredService(); public IScheduler Scheduler => _serviceProvider.GetRequiredService(); public IConfig Config => _serviceProvider.GetRequiredService(); public ILogAnalytics LogAnalytics => _serviceProvider.GetRequiredService(); diff --git a/src/ApiService/IntegrationTests/Fakes/TestContext.cs b/src/ApiService/IntegrationTests/Fakes/TestContext.cs index 66d121e746..c46ff5fce7 100644 --- a/src/ApiService/IntegrationTests/Fakes/TestContext.cs +++ b/src/ApiService/IntegrationTests/Fakes/TestContext.cs @@ -32,7 +32,6 @@ public sealed class TestContext : IOnefuzzContext { TaskOperations = new TaskOperations(provider.CreateLogger(), Cache, this); NodeOperations = new NodeOperations(provider.CreateLogger(), this); JobOperations = new JobOperations(provider.CreateLogger(), this); - JobResultOperations = new JobResultOperations(provider.CreateLogger(), this); NodeTasksOperations = new NodeTasksOperations(provider.CreateLogger(), this); TaskEventOperations = new TaskEventOperations(provider.CreateLogger(), this); NodeMessageOperations = new NodeMessageOperations(provider.CreateLogger(), this); @@ -58,7 +57,6 @@ public Async.Task InsertAll(params EntityBase[] objs) Node n => NodeOperations.Insert(n), Pool p => PoolOperations.Insert(p), Job j => JobOperations.Insert(j), - JobResult jr => JobResultOperations.Insert(jr), Repro r => ReproOperations.Insert(r), Scaleset ss => ScalesetOperations.Insert(ss), NodeTasks nt => NodeTasksOperations.Insert(nt), @@ -86,7 +84,6 @@ public Async.Task InsertAll(params EntityBase[] objs) public ITaskOperations TaskOperations { get; } public IJobOperations JobOperations { get; } - public IJobResultOperations JobResultOperations { get; } public INodeOperations NodeOperations { get; } public INodeTasksOperations NodeTasksOperations { get; } public ITaskEventOperations TaskEventOperations { get; } diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index 254684be97..a1d86e7d25 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -2123,7 +2123,6 @@ dependencies = [ "log", "nix", "notify", - "onefuzz-result", "onefuzz-telemetry", "pete", "pretty_assertions", @@ -2198,20 +2197,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "onefuzz-result" -version = "0.2.0" -dependencies = [ - "anyhow", - "async-trait", - "log", - "onefuzz-telemetry", - "reqwest", - "serde", - "storage-queue", - "uuid", -] - [[package]] name = "onefuzz-task" version = "0.2.0" @@ -2241,7 +2226,6 @@ dependencies = [ "num_cpus", "onefuzz", "onefuzz-file-format", - "onefuzz-result", "onefuzz-telemetry", "path-absolutize", "pretty_assertions", diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index ce01ae880c..2f4cea41a4 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -10,7 +10,6 @@ members = [ "onefuzz", "onefuzz-task", "onefuzz-agent", - "onefuzz-result", "onefuzz-file-format", "onefuzz-telemetry", "reqwest-retry", diff --git a/src/agent/onefuzz-agent/src/config.rs b/src/agent/onefuzz-agent/src/config.rs index fc623e72af..87edfb2c1b 100644 --- a/src/agent/onefuzz-agent/src/config.rs +++ b/src/agent/onefuzz-agent/src/config.rs @@ -34,8 +34,6 @@ pub struct StaticConfig { pub heartbeat_queue: Option, - pub job_result_queue: Option, - pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -73,8 +71,6 @@ struct RawStaticConfig { pub heartbeat_queue: Option, - pub job_result_queue: Option, - pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -121,7 +117,6 @@ impl StaticConfig { microsoft_telemetry_key: config.microsoft_telemetry_key, instance_telemetry_key: config.instance_telemetry_key, heartbeat_queue: config.heartbeat_queue, - job_result_queue: config.job_result_queue, instance_id: config.instance_id, managed: config.managed, machine_identity, @@ -157,12 +152,6 @@ impl StaticConfig { None }; - let job_result_queue = if let Ok(key) = std::env::var("ONEFUZZ_JOB_RESULT") { - Some(Url::parse(&key)?) - } else { - None - }; - let instance_telemetry_key = if let Ok(key) = std::env::var("ONEFUZZ_INSTANCE_TELEMETRY_KEY") { Some(InstanceTelemetryKey::new(Uuid::parse_str(&key)?)) @@ -194,7 +183,6 @@ impl StaticConfig { instance_telemetry_key, microsoft_telemetry_key, heartbeat_queue, - job_result_queue, instance_id, managed: !is_unmanaged, machine_identity, diff --git a/src/agent/onefuzz-agent/src/log_uploader.rs b/src/agent/onefuzz-agent/src/log_uploader.rs index d424013421..6bccc0bef2 100644 --- a/src/agent/onefuzz-agent/src/log_uploader.rs +++ b/src/agent/onefuzz-agent/src/log_uploader.rs @@ -210,3 +210,32 @@ async fn sync_file( blob_client.append_block(Body::from(f)).await?; Ok(len) } + +#[cfg(test)] +mod tests { + use std::io::Seek; + + use anyhow::Result; + use tokio::io::{AsyncReadExt, AsyncSeekExt}; + + #[allow(clippy::unused_io_amount)] + #[tokio::test] + #[ignore] + + async fn test_seek_behavior() -> Result<()> { + let path = "C:\\temp\\test.ps1"; + let mut std_file = std::fs::File::open(path)?; + std_file.seek(std::io::SeekFrom::Start(3))?; + + let mut tokio_file = tokio::fs::File::from_std(std_file); + + let buf = &mut [0u8; 5]; + tokio_file.read(buf).await?; + println!("******** buf {:?}", buf); + tokio_file.seek(std::io::SeekFrom::Start(0)).await?; + tokio_file.read(buf).await?; + println!("******** buf {:?}", buf); + + Ok(()) + } +} diff --git a/src/agent/onefuzz-agent/src/work.rs b/src/agent/onefuzz-agent/src/work.rs index d0222744a7..b55d1d86a1 100644 --- a/src/agent/onefuzz-agent/src/work.rs +++ b/src/agent/onefuzz-agent/src/work.rs @@ -91,10 +91,7 @@ impl WorkSet { pub fn setup_dir(&self) -> Result { let root = self.get_root_folder()?; - // Putting the setup container at the root for backward compatibility. - // The path of setup folder can be used as part of the deduplication logic in the bug filing service - let setup_root = root.parent().ok_or_else(|| anyhow!("Invalid root"))?; - self.setup_url.as_path(setup_root) + self.setup_url.as_path(root) } pub fn extra_setup_dir(&self) -> Result> { diff --git a/src/agent/onefuzz-result/Cargo.toml b/src/agent/onefuzz-result/Cargo.toml deleted file mode 100644 index 7c7de6615c..0000000000 --- a/src/agent/onefuzz-result/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "onefuzz-result" -version = "0.2.0" -authors = ["fuzzing@microsoft.com"] -edition = "2021" -publish = false -license = "MIT" - -[dependencies] -anyhow = { version = "1.0", features = ["backtrace"] } -async-trait = "0.1" -reqwest = "0.11" -serde = "1.0" -storage-queue = { path = "../storage-queue" } -uuid = { version = "1.4", features = ["serde", "v4"] } -onefuzz-telemetry = { path = "../onefuzz-telemetry" } -log = "0.4" - diff --git a/src/agent/onefuzz-result/src/job_result.rs b/src/agent/onefuzz-result/src/job_result.rs deleted file mode 100644 index b305eca2cb..0000000000 --- a/src/agent/onefuzz-result/src/job_result.rs +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -use anyhow::Result; -use async_trait::async_trait; -use onefuzz_telemetry::warn; -use reqwest::Url; -use serde::{self, Deserialize, Serialize}; -use std::collections::HashMap; -use std::sync::Arc; -use storage_queue::QueueClient; -use uuid::Uuid; - -#[derive(Debug, Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] -#[serde(tag = "type")] -pub enum JobResultData { - NewCrashingInput, - NoReproCrashingInput, - NewReport, - NewUniqueReport, - NewRegressionReport, - NewCoverage, - NewCrashDump, - CoverageData, - RuntimeStats, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -struct JobResult { - task_id: Uuid, - job_id: Uuid, - machine_id: Uuid, - machine_name: String, - data: JobResultData, - value: HashMap, -} - -#[derive(Clone)] -pub struct TaskContext { - task_id: Uuid, - job_id: Uuid, - machine_id: Uuid, - machine_name: String, -} - -pub struct JobResultContext { - pub state: TaskContext, - pub queue_client: QueueClient, -} - -pub struct JobResultClient { - pub context: Arc>, -} - -impl JobResultClient { - pub fn init_job_result( - context: TaskContext, - queue_url: Url, - ) -> Result> - where - TaskContext: Send + Sync + 'static, - { - let context = Arc::new(JobResultContext { - state: context, - queue_client: QueueClient::new(queue_url)?, - }); - - Ok(JobResultClient { context }) - } -} - -pub type TaskJobResultClient = JobResultClient; - -pub async fn init_job_result( - queue_url: Url, - task_id: Uuid, - job_id: Uuid, - machine_id: Uuid, - machine_name: String, -) -> Result { - let hb = JobResultClient::init_job_result( - TaskContext { - task_id, - job_id, - machine_id, - machine_name, - }, - queue_url, - )?; - Ok(hb) -} - -#[async_trait] -pub trait JobResultSender { - async fn send_direct(&self, data: JobResultData, value: HashMap); -} - -#[async_trait] -impl JobResultSender for TaskJobResultClient { - async fn send_direct(&self, data: JobResultData, value: HashMap) { - let task_id = self.context.state.task_id; - let job_id = self.context.state.job_id; - let machine_id = self.context.state.machine_id; - let machine_name = self.context.state.machine_name.clone(); - - let _ = self - .context - .queue_client - .enqueue(JobResult { - task_id, - job_id, - machine_id, - machine_name, - data, - value, - }) - .await; - } -} - -#[async_trait] -impl JobResultSender for Option { - async fn send_direct(&self, data: JobResultData, value: HashMap) { - match self { - Some(client) => client.send_direct(data, value).await, - None => warn!("Failed to send Job Result message data from agent."), - } - } -} diff --git a/src/agent/onefuzz-result/src/lib.rs b/src/agent/onefuzz-result/src/lib.rs deleted file mode 100644 index dae666ca9a..0000000000 --- a/src/agent/onefuzz-result/src/lib.rs +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -pub mod job_result; diff --git a/src/agent/onefuzz-task/Cargo.toml b/src/agent/onefuzz-task/Cargo.toml index 4e0bd381b0..0ad2f9aa4f 100644 --- a/src/agent/onefuzz-task/Cargo.toml +++ b/src/agent/onefuzz-task/Cargo.toml @@ -39,7 +39,6 @@ serde_json = "1.0" serde_yaml = "0.9.21" onefuzz = { path = "../onefuzz" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } -onefuzz-result = { path = "../onefuzz-result" } path-absolutize = "3.1" reqwest-retry = { path = "../reqwest-retry" } strum = "0.25" diff --git a/src/agent/onefuzz-task/src/local/cmd.rs b/src/agent/onefuzz-task/src/local/cmd.rs index eabefb71ee..80fd51a96b 100644 --- a/src/agent/onefuzz-task/src/local/cmd.rs +++ b/src/agent/onefuzz-task/src/local/cmd.rs @@ -3,7 +3,11 @@ #[cfg(any(target_os = "linux", target_os = "windows"))] use crate::local::coverage; -use crate::local::{common::add_common_config, libfuzzer_fuzz, tui::TerminalUi}; +use crate::local::{ + common::add_common_config, generic_analysis, generic_crash_report, generic_generator, + libfuzzer, libfuzzer_crash_report, libfuzzer_fuzz, libfuzzer_merge, libfuzzer_regression, + libfuzzer_test_input, radamsa, test_input, tui::TerminalUi, +}; use anyhow::{Context, Result}; use clap::{Arg, ArgAction, Command}; use std::time::Duration; @@ -17,9 +21,19 @@ use super::template; #[derive(Debug, PartialEq, Eq, EnumString, IntoStaticStr, EnumIter)] #[strum(serialize_all = "kebab-case")] enum Commands { + Radamsa, #[cfg(any(target_os = "linux", target_os = "windows"))] Coverage, LibfuzzerFuzz, + LibfuzzerMerge, + LibfuzzerCrashReport, + LibfuzzerTestInput, + LibfuzzerRegression, + Libfuzzer, + CrashReport, + Generator, + Analysis, + TestInput, Template, } @@ -54,7 +68,23 @@ pub async fn run(args: clap::ArgMatches) -> Result<()> { match command { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::run(&sub_args, event_sender).await, + Commands::Radamsa => radamsa::run(&sub_args, event_sender).await, + Commands::LibfuzzerCrashReport => { + libfuzzer_crash_report::run(&sub_args, event_sender).await + } Commands::LibfuzzerFuzz => libfuzzer_fuzz::run(&sub_args, event_sender).await, + Commands::LibfuzzerMerge => libfuzzer_merge::run(&sub_args, event_sender).await, + Commands::LibfuzzerTestInput => { + libfuzzer_test_input::run(&sub_args, event_sender).await + } + Commands::LibfuzzerRegression => { + libfuzzer_regression::run(&sub_args, event_sender).await + } + Commands::Libfuzzer => libfuzzer::run(&sub_args, event_sender).await, + Commands::CrashReport => generic_crash_report::run(&sub_args, event_sender).await, + Commands::Generator => generic_generator::run(&sub_args, event_sender).await, + Commands::Analysis => generic_analysis::run(&sub_args, event_sender).await, + Commands::TestInput => test_input::run(&sub_args, event_sender).await, Commands::Template => { let config = sub_args .get_one::("config") @@ -110,7 +140,17 @@ pub fn args(name: &'static str) -> Command { let app = match subcommand { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::args(subcommand.into()), + Commands::Radamsa => radamsa::args(subcommand.into()), + Commands::LibfuzzerCrashReport => libfuzzer_crash_report::args(subcommand.into()), Commands::LibfuzzerFuzz => libfuzzer_fuzz::args(subcommand.into()), + Commands::LibfuzzerMerge => libfuzzer_merge::args(subcommand.into()), + Commands::LibfuzzerTestInput => libfuzzer_test_input::args(subcommand.into()), + Commands::LibfuzzerRegression => libfuzzer_regression::args(subcommand.into()), + Commands::Libfuzzer => libfuzzer::args(subcommand.into()), + Commands::CrashReport => generic_crash_report::args(subcommand.into()), + Commands::Generator => generic_generator::args(subcommand.into()), + Commands::Analysis => generic_analysis::args(subcommand.into()), + Commands::TestInput => test_input::args(subcommand.into()), Commands::Template => Command::new("template") .about("uses the template to generate a run") .args(vec![Arg::new("config") diff --git a/src/agent/onefuzz-task/src/local/common.rs b/src/agent/onefuzz-task/src/local/common.rs index 17940d799f..f8d7949e80 100644 --- a/src/agent/onefuzz-task/src/local/common.rs +++ b/src/agent/onefuzz-task/src/local/common.rs @@ -26,10 +26,20 @@ pub const INPUTS_DIR: &str = "inputs_dir"; pub const CRASHES_DIR: &str = "crashes_dir"; pub const CRASHDUMPS_DIR: &str = "crashdumps_dir"; pub const TARGET_WORKERS: &str = "target_workers"; +pub const REPORTS_DIR: &str = "reports_dir"; +pub const NO_REPRO_DIR: &str = "no_repro_dir"; pub const TARGET_TIMEOUT: &str = "target_timeout"; +pub const CHECK_RETRY_COUNT: &str = "check_retry_count"; +pub const DISABLE_CHECK_QUEUE: &str = "disable_check_queue"; +pub const UNIQUE_REPORTS_DIR: &str = "unique_reports_dir"; pub const COVERAGE_DIR: &str = "coverage_dir"; pub const READONLY_INPUTS: &str = "readonly_inputs_dir"; +pub const CHECK_ASAN_LOG: &str = "check_asan_log"; +pub const TOOLS_DIR: &str = "tools_dir"; +pub const RENAME_OUTPUT: &str = "rename_output"; pub const CHECK_FUZZER_HELP: &str = "check_fuzzer_help"; +pub const DISABLE_CHECK_DEBUGGER: &str = "disable_check_debugger"; +pub const REGRESSION_REPORTS_DIR: &str = "regression_reports_dir"; pub const TARGET_EXE: &str = "target_exe"; pub const TARGET_ENV: &str = "target_env"; @@ -37,6 +47,17 @@ pub const TARGET_OPTIONS: &str = "target_options"; // pub const SUPERVISOR_EXE: &str = "supervisor_exe"; // pub const SUPERVISOR_ENV: &str = "supervisor_env"; // pub const SUPERVISOR_OPTIONS: &str = "supervisor_options"; +pub const GENERATOR_EXE: &str = "generator_exe"; +pub const GENERATOR_ENV: &str = "generator_env"; +pub const GENERATOR_OPTIONS: &str = "generator_options"; + +pub const ANALYZER_EXE: &str = "analyzer_exe"; +pub const ANALYZER_OPTIONS: &str = "analyzer_options"; +pub const ANALYZER_ENV: &str = "analyzer_env"; +pub const ANALYSIS_DIR: &str = "analysis_dir"; +pub const ANALYSIS_INPUTS: &str = "analysis_inputs"; +pub const ANALYSIS_UNIQUE_INPUTS: &str = "analysis_unique_inputs"; +pub const PRESERVE_EXISTING_OUTPUTS: &str = "preserve_existing_outputs"; pub const CREATE_JOB_DIR: &str = "create_job_dir"; @@ -45,6 +66,7 @@ const WAIT_FOR_DIR_DELAY: Duration = Duration::from_secs(1); pub enum CmdType { Target, + Generator, // Supervisor, } @@ -68,6 +90,7 @@ pub fn get_cmd_exe(cmd_type: CmdType, args: &clap::ArgMatches) -> Result let name = match cmd_type { CmdType::Target => TARGET_EXE, // CmdType::Supervisor => SUPERVISOR_EXE, + CmdType::Generator => GENERATOR_EXE, }; args.get_one::(name) @@ -79,6 +102,7 @@ pub fn get_cmd_arg(cmd_type: CmdType, args: &clap::ArgMatches) -> Vec { let name = match cmd_type { CmdType::Target => TARGET_OPTIONS, // CmdType::Supervisor => SUPERVISOR_OPTIONS, + CmdType::Generator => GENERATOR_OPTIONS, }; args.get_many::(name) @@ -91,6 +115,7 @@ pub fn get_cmd_env(cmd_type: CmdType, args: &clap::ArgMatches) -> Result TARGET_ENV, // CmdType::Supervisor => SUPERVISOR_ENV, + CmdType::Generator => GENERATOR_ENV, }; get_hash_map(args, env_name) } @@ -240,7 +265,6 @@ pub async fn build_local_context( }, instance_telemetry_key: None, heartbeat_queue: None, - job_result_queue: None, microsoft_telemetry_key: None, logs: None, min_available_memory_mb: 0, diff --git a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml index aba02c7991..7210893809 100644 --- a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml +++ b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml @@ -5,31 +5,28 @@ # 2. Install llvm and export LLVM_SYMBOLIZER_PATH like we do in setup.sh -required_args: &required_args - target_exe: "REPLACE_ME" # The path to your target - inputs: &inputs "REPLACE_ME" # A folder containining your inputs - crashes: &crashes "REPLACE_ME" # The folder where you want the crashing inputs to be output - crashdumps: "REPLACE_ME" # The folder where you want the crash dumps to be output - coverage: "REPLACE_ME" # The folder where you want the code coverage to be output - regression_reports: "REPLACE_ME" # The folder where you want the regression reports to be output - target_args: &target_args - <<: *required_args target_env: {} + target_exe: "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\fuzz.exe" target_options: [] +inputs: &inputs "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\seeds" + tasks: - type: LibFuzzer <<: *target_args + inputs: *inputs + crashes: &crash "./crashes" readonly_inputs: [] check_fuzzer_help: true - - type: LibfuzzerRegression + - type: "Report" <<: *target_args - - - type: "LibfuzzerCrashReport" - <<: *target_args - input_queue: *crashes + input_queue: *crash + crashes: *crash + reports: "./reports" + unique_reports: "./unique_reports" + no_repro: "./no_repro" check_fuzzer_help: true - type: "Coverage" @@ -38,11 +35,4 @@ tasks: - "{input}" input_queue: *inputs readonly_inputs: [*inputs] - - # The analysis task is optional in the libfuzzer_basic template - # - type: Analysis - # <<: *target_args - # analysis: "REPLACE_ME" # The folder where you want the analysis results to be output - # analyzer_exe: "REPLACE_ME" - # analyzer_options: [] - # analyzer_env: {} + coverage: "./coverage" diff --git a/src/agent/onefuzz-task/src/local/generic_analysis.rs b/src/agent/onefuzz-task/src/local/generic_analysis.rs index 429e7b0e3b..3d3e2fafc8 100644 --- a/src/agent/onefuzz-task/src/local/generic_analysis.rs +++ b/src/agent/onefuzz-task/src/local/generic_analysis.rs @@ -3,13 +3,139 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::config::CommonConfig; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_exe, get_hash_map, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, ANALYSIS_DIR, ANALYZER_ENV, ANALYZER_EXE, ANALYZER_OPTIONS, + CRASHES_DIR, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TOOLS_DIR, + UNIQUE_REPORTS_DIR, + }, + tasks::{ + analysis::generic::{run as run_analysis, Config}, + config::CommonConfig, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, Command}; +use flume::Sender; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; +pub fn build_analysis_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_options = get_cmd_arg(CmdType::Target, args); + + let analyzer_exe = args + .get_one::(ANALYZER_EXE) + .cloned() + .ok_or_else(|| format_err!("expected {ANALYZER_EXE}"))?; + + let analyzer_options = args + .get_many::(ANALYZER_OPTIONS) + .unwrap_or_default() + .map(|x| x.to_string()) + .collect(); + + let analyzer_env = get_hash_map(args, ANALYZER_ENV)?; + let analysis = get_synced_dir(ANALYSIS_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args)?; + let crashes = if input_queue.is_none() { + get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)? + } else { + None + }; + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let config = Config { + analyzer_exe, + analyzer_options, + analyzer_env, + target_exe, + target_options, + input_queue, + crashes, + analysis, + tools: Some(tools), + reports, + unique_reports, + no_repro, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_analysis_config(args, None, context.common_config.clone(), event_sender)?; + run_analysis(config).await +} + +pub fn build_shared_args(required_task: bool) -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV) + .long(TARGET_ENV) + .requires(TARGET_EXE) + .num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .default_value("{input}") + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(ANALYZER_OPTIONS) + .long(ANALYZER_OPTIONS) + .requires(ANALYZER_EXE) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(ANALYZER_ENV) + .long(ANALYZER_ENV) + .requires(ANALYZER_EXE) + .num_args(0..), + Arg::new(TOOLS_DIR) + .long(TOOLS_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(ANALYZER_EXE) + .long(ANALYZER_EXE) + .requires(ANALYSIS_DIR) + .requires(CRASHES_DIR) + .required(required_task), + Arg::new(ANALYSIS_DIR) + .long(ANALYSIS_DIR) + .requires(ANALYZER_EXE) + .requires(CRASHES_DIR) + .required(required_task), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only generic analysis") + .args(&build_shared_args(true)) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Analysis { analyzer_exe: String, @@ -20,7 +146,7 @@ pub struct Analysis { input_queue: Option, crashes: Option, analysis: PathBuf, - tools: Option, + tools: PathBuf, reports: Option, unique_reports: Option, no_repro: Option, @@ -49,10 +175,9 @@ impl Template for Analysis { .and_then(|path| context.to_monitored_sync_dir("crashes", path).ok()), analysis: context.to_monitored_sync_dir("analysis", self.analysis.clone())?, - tools: self - .tools - .as_ref() - .and_then(|path| context.to_monitored_sync_dir("tools", path).ok()), + tools: context + .to_monitored_sync_dir("tools", self.tools.clone()) + .ok(), reports: self .reports diff --git a/src/agent/onefuzz-task/src/local/generic_crash_report.rs b/src/agent/onefuzz-task/src/local/generic_crash_report.rs index 347a8cac76..6b0e2fccad 100644 --- a/src/agent/onefuzz-task/src/local/generic_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/generic_crash_report.rs @@ -3,14 +3,150 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, CRASHES_DIR, + DISABLE_CHECK_DEBUGGER, DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, + TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, + }, + tasks::{ + config::CommonConfig, + report::generic::{Config, ReportTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; +pub fn build_report_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + + let crashes = Some(get_synced_dir( + CRASHES_DIR, + common.job_id, + common.task_id, + args, + )?) + .monitor_count(&event_sender)?; + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let unique_reports = Some(get_synced_dir( + UNIQUE_REPORTS_DIR, + common.job_id, + common.task_id, + args, + )?) + .monitor_count(&event_sender)?; + + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default"); + + let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); + let check_asan_log = args.get_flag(CHECK_ASAN_LOG); + let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); + + let config = Config { + target_exe, + target_env, + target_options, + target_timeout, + check_asan_log, + check_debugger, + check_retry_count, + check_queue, + crashes, + minimized_stack_depth: None, + input_queue, + no_repro, + reports, + unique_reports, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; + ReportTask::new(config).managed_run().await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(REPORTS_DIR) + .long(REPORTS_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(NO_REPRO_DIR) + .long(NO_REPRO_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(UNIQUE_REPORTS_DIR) + .long(UNIQUE_REPORTS_DIR) + .value_parser(value_parser!(PathBuf)) + .required(true), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)) + .default_value("30"), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(DISABLE_CHECK_QUEUE) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_QUEUE), + Arg::new(CHECK_ASAN_LOG) + .action(ArgAction::SetTrue) + .long(CHECK_ASAN_LOG), + Arg::new(DISABLE_CHECK_DEBUGGER) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_DEBUGGER), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only generic crash report") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct CrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/generic_generator.rs b/src/agent/onefuzz-task/src/local/generic_generator.rs index ae9f6a3cc6..823ba221d6 100644 --- a/src/agent/onefuzz-task/src/local/generic_generator.rs +++ b/src/agent/onefuzz-task/src/local/generic_generator.rs @@ -3,14 +3,154 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, + get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, + CRASHES_DIR, DISABLE_CHECK_DEBUGGER, GENERATOR_ENV, GENERATOR_EXE, GENERATOR_OPTIONS, + READONLY_INPUTS, RENAME_OUTPUT, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, + TOOLS_DIR, + }, + tasks::{ + config::CommonConfig, + fuzz::generator::{Config, GeneratorTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; use super::template::{RunContext, Template}; +pub fn build_fuzz_config( + args: &clap::ArgMatches, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_options = get_cmd_arg(CmdType::Target, args); + let target_env = get_cmd_env(CmdType::Target, args)?; + + let generator_exe = get_cmd_exe(CmdType::Generator, args)?; + let generator_options = get_cmd_arg(CmdType::Generator, args); + let generator_env = get_cmd_env(CmdType::Generator, args)?; + let readonly_inputs = get_synced_dirs(READONLY_INPUTS, common.job_id, common.task_id, args)? + .into_iter() + .map(|sd| sd.monitor_count(&event_sender)) + .collect::>>()?; + + let rename_output = args.get_flag(RENAME_OUTPUT); + let check_asan_log = args.get_flag(CHECK_ASAN_LOG); + let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); + + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default"); + + let target_timeout = Some( + args.get_one::(TARGET_TIMEOUT) + .copied() + .expect("has a default"), + ); + + let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let ensemble_sync_delay = None; + + let config = Config { + generator_exe, + generator_env, + generator_options, + readonly_inputs, + crashes, + tools, + target_exe, + target_env, + target_options, + target_timeout, + check_asan_log, + check_debugger, + check_retry_count, + rename_output, + ensemble_sync_delay, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_fuzz_config(args, context.common_config.clone(), event_sender)?; + GeneratorTask::new(config).run().await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(GENERATOR_EXE) + .long(GENERATOR_EXE) + .default_value("radamsa") + .required(true), + Arg::new(GENERATOR_ENV).long(GENERATOR_ENV).num_args(0..), + Arg::new(GENERATOR_OPTIONS) + .long(GENERATOR_OPTIONS) + .value_delimiter(' ') + .default_value("-H sha256 -o {generated_inputs}/input-%h.%s -n 100 -r {input_corpus}") + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .required(true) + .long(CRASHES_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(READONLY_INPUTS) + .required(true) + .num_args(1..) + .value_parser(value_parser!(PathBuf)) + .long(READONLY_INPUTS), + Arg::new(TOOLS_DIR) + .long(TOOLS_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(CHECK_ASAN_LOG) + .action(ArgAction::SetTrue) + .long(CHECK_ASAN_LOG), + Arg::new(RENAME_OUTPUT) + .action(ArgAction::SetTrue) + .long(RENAME_OUTPUT), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)) + .default_value("30"), + Arg::new(DISABLE_CHECK_DEBUGGER) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_DEBUGGER), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only generator fuzzing task") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Generator { generator_exe: String, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer.rs b/src/agent/onefuzz-task/src/local/libfuzzer.rs index 433636be1c..56dff7dbe3 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer.rs @@ -1,19 +1,168 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::tasks::{ - config::CommonConfig, - fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, - utils::default_bool_true, +#[cfg(any(target_os = "linux", target_os = "windows"))] +use crate::{ + local::{common::COVERAGE_DIR, coverage, coverage::build_shared_args as build_coverage_args}, + tasks::coverage::generic::CoverageTask, +}; +use crate::{ + local::{ + common::{ + build_local_context, wait_for_dir, DirectoryMonitorQueue, UiEvent, ANALYZER_EXE, + REGRESSION_REPORTS_DIR, UNIQUE_REPORTS_DIR, + }, + generic_analysis::{build_analysis_config, build_shared_args as build_analysis_args}, + libfuzzer_crash_report::{build_report_config, build_shared_args as build_crash_args}, + libfuzzer_fuzz::{build_fuzz_config, build_shared_args as build_fuzz_args}, + libfuzzer_regression::{ + build_regression_config, build_shared_args as build_regression_args, + }, + }, + tasks::{ + analysis::generic::run as run_analysis, + config::CommonConfig, + fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, + regression::libfuzzer::LibFuzzerRegressionTask, + report::libfuzzer_report::ReportTask, + utils::default_bool_true, + }, }; use anyhow::Result; use async_trait::async_trait; -use onefuzz::syncdir::SyncedDir; +use clap::Command; +use flume::Sender; +use onefuzz::{syncdir::SyncedDir, utils::try_wait_all_join_handles}; use schemars::JsonSchema; -use std::{collections::HashMap, path::PathBuf}; +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, +}; +use tokio::task::spawn; +use uuid::Uuid; use super::template::{RunContext, Template}; +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; + let crash_dir = fuzz_config + .crashes + .remote_url()? + .as_file_path() + .expect("invalid crash dir remote location"); + + let fuzzer = LibFuzzerFuzzTask::new(fuzz_config)?; + let mut task_handles = vec![]; + + let fuzz_task = spawn(async move { fuzzer.run().await }); + + wait_for_dir(&crash_dir).await?; + + task_handles.push(fuzz_task); + + if args.contains_id(UNIQUE_REPORTS_DIR) { + let crash_report_input_monitor = + DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; + + let report_config = build_report_config( + args, + Some(crash_report_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender.clone(), + )?; + + let mut report = ReportTask::new(report_config); + let report_task = spawn(async move { report.managed_run().await }); + + task_handles.push(report_task); + task_handles.push(crash_report_input_monitor.handle); + } + + #[cfg(any(target_os = "linux", target_os = "windows"))] + if args.contains_id(COVERAGE_DIR) { + let coverage_input_monitor = + DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; + let coverage_config = coverage::build_coverage_config( + args, + true, + Some(coverage_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender.clone(), + )?; + + let mut coverage = CoverageTask::new(coverage_config); + let coverage_task = spawn(async move { coverage.run().await }); + + task_handles.push(coverage_task); + task_handles.push(coverage_input_monitor.handle); + } + + if args.contains_id(ANALYZER_EXE) { + let analysis_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir).await?; + let analysis_config = build_analysis_config( + args, + Some(analysis_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender.clone(), + )?; + let analysis_task = spawn(async move { run_analysis(analysis_config).await }); + + task_handles.push(analysis_task); + task_handles.push(analysis_input_monitor.handle); + } + + if args.contains_id(REGRESSION_REPORTS_DIR) { + let regression_config = build_regression_config( + args, + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender, + )?; + let regression = LibFuzzerRegressionTask::new(regression_config); + let regression_task = spawn(async move { regression.run().await }); + task_handles.push(regression_task); + } + + try_wait_all_join_handles(task_handles).await?; + + Ok(()) +} + +pub fn args(name: &'static str) -> Command { + let mut app = Command::new(name).about("run a local libfuzzer & crash reporting task"); + + let mut used = HashSet::new(); + + for args in &[ + build_fuzz_args(), + build_crash_args(), + build_analysis_args(false), + #[cfg(any(target_os = "linux", target_os = "windows"))] + build_coverage_args(true), + build_regression_args(false), + ] { + for arg in args { + if used.insert(arg.get_id()) { + app = app.arg(arg); + } + } + } + + app +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibFuzzer { inputs: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs index 04ba4f9225..c1ab283575 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs @@ -3,13 +3,139 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, CRASHES_DIR, + DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, + TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, + }, + tasks::{ + config::CommonConfig, + report::libfuzzer_report::{Config, ReportTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; + +pub fn build_report_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + + let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default"); + + let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); + + let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); + + let crashes = if input_queue.is_none() { crashes } else { None }; + + let config = Config { + target_exe, + target_env, + target_options, + target_timeout, + check_retry_count, + check_fuzzer_help, + minimized_stack_depth: None, + input_queue, + check_queue, + crashes, + reports, + no_repro, + unique_reports, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; + ReportTask::new(config).managed_run().await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(REPORTS_DIR) + .long(REPORTS_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(NO_REPRO_DIR) + .long(NO_REPRO_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(UNIQUE_REPORTS_DIR) + .long(UNIQUE_REPORTS_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)) + .long(TARGET_TIMEOUT), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(DISABLE_CHECK_QUEUE) + .action(ArgAction::SetTrue) + .long(DISABLE_CHECK_QUEUE), + Arg::new(CHECK_FUZZER_HELP) + .action(ArgAction::SetTrue) + .long(CHECK_FUZZER_HELP), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only libfuzzer crash report task") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerCrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs index 4b3e4ce58f..69c9df820b 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs @@ -3,15 +3,97 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, + get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, ANALYSIS_INPUTS, + ANALYSIS_UNIQUE_INPUTS, CHECK_FUZZER_HELP, INPUTS_DIR, PRESERVE_EXISTING_OUTPUTS, + TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, + }, + tasks::{ + config::CommonConfig, + merge::libfuzzer_merge::{spawn, Config}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use futures::future::OptionFuture; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; +use storage_queue::QueueClient; use super::template::{RunContext, Template}; +pub fn build_merge_config( + args: &clap::ArgMatches, + input_queue: Option, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); + let inputs = get_synced_dirs(ANALYSIS_INPUTS, common.job_id, common.task_id, args)? + .into_iter() + .map(|sd| sd.monitor_count(&event_sender)) + .collect::>>()?; + let unique_inputs = + get_synced_dir(ANALYSIS_UNIQUE_INPUTS, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let preserve_existing_outputs = args + .get_one::(PRESERVE_EXISTING_OUTPUTS) + .copied() + .unwrap_or_default(); + + let config = Config { + target_exe, + target_env, + target_options, + input_queue, + inputs, + unique_inputs, + preserve_existing_outputs, + check_fuzzer_help, + common, + }; + + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_merge_config(args, None, context.common_config.clone(), event_sender)?; + spawn(config).await +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(CHECK_FUZZER_HELP) + .action(ArgAction::SetTrue) + .long(CHECK_FUZZER_HELP), + Arg::new(INPUTS_DIR) + .long(INPUTS_DIR) + .value_parser(value_parser!(PathBuf)) + .num_args(0..), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only libfuzzer crash report task") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerMerge { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs index 3fbb9f0bd6..501d2385e2 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs @@ -3,13 +3,145 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::tasks::{config::CommonConfig, utils::default_bool_true}; +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, + SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, COVERAGE_DIR, + CRASHES_DIR, NO_REPRO_DIR, REGRESSION_REPORTS_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, + TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, + }, + tasks::{ + config::CommonConfig, + regression::libfuzzer::{Config, LibFuzzerRegressionTask}, + utils::default_bool_true, + }, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use schemars::JsonSchema; use super::template::{RunContext, Template}; +const REPORT_NAMES: &str = "report_names"; + +pub fn build_regression_config( + args: &clap::ArgMatches, + common: CommonConfig, + event_sender: Option>, +) -> Result { + let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let regression_reports = + get_synced_dir(REGRESSION_REPORTS_DIR, common.job_id, common.task_id, args)? + .monitor_count(&event_sender)?; + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default value"); + + let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) + .ok() + .monitor_count(&event_sender)?; + + let report_list: Option> = args + .get_many::(REPORT_NAMES) + .map(|x| x.cloned().collect()); + + let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); + + let config = Config { + target_exe, + target_env, + target_options, + target_timeout, + check_fuzzer_help, + check_retry_count, + crashes, + regression_reports, + reports, + no_repro, + unique_reports, + readonly_inputs: None, + report_list, + minimized_stack_depth: None, + common, + }; + Ok(config) +} + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let config = build_regression_config(args, context.common_config.clone(), event_sender)?; + LibFuzzerRegressionTask::new(config).run().await +} + +pub fn build_shared_args(local_job: bool) -> Vec { + let mut args = vec![ + Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(COVERAGE_DIR) + .required(!local_job) + .long(COVERAGE_DIR) + .value_parser(value_parser!(PathBuf)), + Arg::new(CHECK_FUZZER_HELP) + .action(ArgAction::SetTrue) + .long(CHECK_FUZZER_HELP), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)), + Arg::new(CRASHES_DIR) + .long(CRASHES_DIR) + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(REGRESSION_REPORTS_DIR) + .long(REGRESSION_REPORTS_DIR) + .required(local_job) + .value_parser(value_parser!(PathBuf)), + Arg::new(REPORTS_DIR) + .long(REPORTS_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(NO_REPRO_DIR) + .long(NO_REPRO_DIR) + .required(false) + .value_parser(value_parser!(PathBuf)), + Arg::new(UNIQUE_REPORTS_DIR) + .long(UNIQUE_REPORTS_DIR) + .value_parser(value_parser!(PathBuf)) + .required(true), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + ]; + if local_job { + args.push(Arg::new(REPORT_NAMES).long(REPORT_NAMES).num_args(0..)) + } + args +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("execute a local-only libfuzzer regression task") + .args(&build_shared_args(true)) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerRegression { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs index 5bef2347f7..9c6f16094e 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs @@ -1,14 +1,97 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_RETRY_COUNT, + TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, + }, + tasks::report::libfuzzer_report::{test_input, TestInputArgs}, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, Command}; +use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender).await?; + + let target_exe = args + .get_one::(TARGET_EXE) + .expect("marked as required"); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let input = args + .get_one::("input") + .expect("marked as required"); + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has a default value"); + + let extra_setup_dir = context.common_config.extra_setup_dir.as_deref(); + let extra_output_dir = context + .common_config + .extra_output + .as_ref() + .map(|x| x.local_path.as_path()); + + let config = TestInputArgs { + target_exe: target_exe.as_path(), + target_env: &target_env, + target_options: &target_options, + input_url: None, + input: input.as_path(), + job_id: context.common_config.job_id, + task_id: context.common_config.task_id, + target_timeout, + check_retry_count, + setup_dir: &context.common_config.setup_dir, + extra_setup_dir, + extra_output_dir, + minimized_stack_depth: None, + machine_identity: context.common_config.machine_identity, + }; + + let result = test_input(config).await?; + println!("{}", serde_json::to_string_pretty(&result)?); + Ok(()) +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).required(true), + Arg::new("input") + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("test a libfuzzer application with a specific input") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerTestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/mod.rs b/src/agent/onefuzz-task/src/local/mod.rs index 385ff8ffcd..03d394bcdb 100644 --- a/src/agent/onefuzz-task/src/local/mod.rs +++ b/src/agent/onefuzz-task/src/local/mod.rs @@ -14,6 +14,7 @@ pub mod libfuzzer_fuzz; pub mod libfuzzer_merge; pub mod libfuzzer_regression; pub mod libfuzzer_test_input; +pub mod radamsa; pub mod template; pub mod test_input; pub mod tui; diff --git a/src/agent/onefuzz-task/src/local/radamsa.rs b/src/agent/onefuzz-task/src/local/radamsa.rs new file mode 100644 index 0000000000..4d84de027a --- /dev/null +++ b/src/agent/onefuzz-task/src/local/radamsa.rs @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::{ + local::{ + common::{build_local_context, DirectoryMonitorQueue, UiEvent}, + generic_crash_report::{build_report_config, build_shared_args as build_crash_args}, + generic_generator::{build_fuzz_config, build_shared_args as build_fuzz_args}, + }, + tasks::{config::CommonConfig, fuzz::generator::GeneratorTask, report::generic::ReportTask}, +}; +use anyhow::{Context, Result}; +use clap::Command; +use flume::Sender; +use onefuzz::utils::try_wait_all_join_handles; +use std::collections::HashSet; +use tokio::task::spawn; +use uuid::Uuid; + +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, true, event_sender.clone()).await?; + let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; + let crash_dir = fuzz_config + .crashes + .remote_url()? + .as_file_path() + .ok_or_else(|| format_err!("invalid crash directory"))?; + + tokio::fs::create_dir_all(&crash_dir) + .await + .with_context(|| { + format!( + "unable to create crashes directory: {}", + crash_dir.display() + ) + })?; + + let fuzzer = GeneratorTask::new(fuzz_config); + let fuzz_task = spawn(async move { fuzzer.run().await }); + + let crash_report_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir) + .await + .context("directory monitor failed")?; + let report_config = build_report_config( + args, + Some(crash_report_input_monitor.queue_client), + CommonConfig { + task_id: Uuid::new_v4(), + ..context.common_config.clone() + }, + event_sender, + )?; + let report_task = spawn(async move { ReportTask::new(report_config).managed_run().await }); + + try_wait_all_join_handles(vec![ + fuzz_task, + report_task, + crash_report_input_monitor.handle, + ]) + .await?; + + Ok(()) +} + +pub fn args(name: &'static str) -> Command { + let mut app = Command::new(name).about("run a local generator & crash reporting job"); + + let mut used = HashSet::new(); + for args in &[build_fuzz_args(), build_crash_args()] { + for arg in args { + if used.insert(arg.get_id()) { + app = app.arg(arg); + } + } + } + + app +} diff --git a/src/agent/onefuzz-task/src/local/schema.json b/src/agent/onefuzz-task/src/local/schema.json index e5b00f6e17..0a1f128e67 100644 --- a/src/agent/onefuzz-task/src/local/schema.json +++ b/src/agent/onefuzz-task/src/local/schema.json @@ -126,6 +126,7 @@ "analyzer_options", "target_exe", "target_options", + "tools", "type" ], "properties": { @@ -181,10 +182,7 @@ } }, "tools": { - "type": [ - "string", - "null" - ] + "type": "string" }, "type": { "type": "string", @@ -895,4 +893,4 @@ ] } } -} \ No newline at end of file +} diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index 73ae6e5e48..b2e0c425ff 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -196,7 +196,6 @@ pub async fn launch( job_id: Uuid::new_v4(), instance_id: Uuid::new_v4(), heartbeat_queue: None, - job_result_queue: None, instance_telemetry_key: None, microsoft_telemetry_key: None, logs: None, @@ -242,10 +241,12 @@ mod test { .expect("Couldn't find checked-in schema.json") .replace("\r\n", "\n"); - if schema_str.replace('\n', "") != checked_in_schema.replace('\n', "") { - std::fs::write("src/local/new.schema.json", schema_str) - .expect("The schemas did not match but failed to write new schema to file."); - panic!("The checked-in local fuzzing schema did not match the generated schema. The generated schema can be found at src/local/new.schema.json"); - } + println!("{}", schema_str); + + assert_eq!( + schema_str.replace('\n', ""), + checked_in_schema.replace('\n', ""), + "The checked-in local fuzzing schema did not match the generated schema." + ); } } diff --git a/src/agent/onefuzz-task/src/local/test_input.rs b/src/agent/onefuzz-task/src/local/test_input.rs index b8027a7f41..4077bd08f8 100644 --- a/src/agent/onefuzz-task/src/local/test_input.rs +++ b/src/agent/onefuzz-task/src/local/test_input.rs @@ -1,8 +1,18 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use crate::{ + local::common::{ + build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_ASAN_LOG, + CHECK_RETRY_COUNT, DISABLE_CHECK_DEBUGGER, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, + TARGET_TIMEOUT, + }, + tasks::report::generic::{test_input, TestInputArgs}, +}; use anyhow::Result; use async_trait::async_trait; +use clap::{Arg, ArgAction, Command}; +use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; @@ -10,6 +20,82 @@ use uuid::Uuid; use super::template::{RunContext, Template}; +pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { + let context = build_local_context(args, false, event_sender).await?; + + let target_exe = args + .get_one::(TARGET_EXE) + .expect("is marked required"); + let target_env = get_cmd_env(CmdType::Target, args)?; + let target_options = get_cmd_arg(CmdType::Target, args); + let input = args + .get_one::("input") + .expect("is marked required"); + let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); + let check_retry_count = args + .get_one::(CHECK_RETRY_COUNT) + .copied() + .expect("has default value"); + let check_asan_log = args.get_flag(CHECK_ASAN_LOG); + let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); + + let config = TestInputArgs { + target_exe: target_exe.as_path(), + target_env: &target_env, + target_options: &target_options, + input_url: None, + input: input.as_path(), + job_id: context.common_config.job_id, + task_id: context.common_config.task_id, + target_timeout, + check_retry_count, + setup_dir: &context.common_config.setup_dir, + extra_setup_dir: context.common_config.extra_setup_dir.as_deref(), + minimized_stack_depth: None, + check_asan_log, + check_debugger, + machine_identity: context.common_config.machine_identity.clone(), + }; + + let result = test_input(config).await?; + println!("{}", serde_json::to_string_pretty(&result)?); + Ok(()) +} + +pub fn build_shared_args() -> Vec { + vec![ + Arg::new(TARGET_EXE).required(true), + Arg::new("input") + .required(true) + .value_parser(value_parser!(PathBuf)), + Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), + Arg::new(TARGET_OPTIONS) + .default_value("{input}") + .long(TARGET_OPTIONS) + .value_delimiter(' ') + .help("Use a quoted string with space separation to denote multiple arguments"), + Arg::new(TARGET_TIMEOUT) + .long(TARGET_TIMEOUT) + .value_parser(value_parser!(u64)), + Arg::new(CHECK_RETRY_COUNT) + .long(CHECK_RETRY_COUNT) + .value_parser(value_parser!(u64)) + .default_value("0"), + Arg::new(CHECK_ASAN_LOG) + .action(ArgAction::SetTrue) + .long(CHECK_ASAN_LOG), + Arg::new(DISABLE_CHECK_DEBUGGER) + .action(ArgAction::SetTrue) + .long("disable_check_debugger"), + ] +} + +pub fn args(name: &'static str) -> Command { + Command::new(name) + .about("test an application with a specific input") + .args(&build_shared_args()) +} + #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct TestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs index 05c6c3d169..3ba068a614 100644 --- a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs @@ -65,8 +65,6 @@ pub async fn run(config: Config) -> Result<()> { tools.init_pull().await?; } - let job_result_client = config.common.init_job_result().await?; - // the tempdir is always created, however, the reports_path and // reports_monitor_future are only created if we have one of the three // report SyncedDir. The idea is that the option for where to write reports @@ -90,7 +88,6 @@ pub async fn run(config: Config) -> Result<()> { &config.unique_reports, &config.reports, &config.no_repro, - &job_result_client, ); ( Some(reports_dir.path().to_path_buf()), @@ -174,7 +171,7 @@ async fn poll_inputs( } message.delete().await?; } else { - debug!("no new candidate inputs found, sleeping"); + warn!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; } } diff --git a/src/agent/onefuzz-task/src/tasks/config.rs b/src/agent/onefuzz-task/src/tasks/config.rs index e29e0fd60d..0848379d73 100644 --- a/src/agent/onefuzz-task/src/tasks/config.rs +++ b/src/agent/onefuzz-task/src/tasks/config.rs @@ -14,7 +14,6 @@ use onefuzz::{ machine_id::MachineIdentity, syncdir::{SyncOperation, SyncedDir}, }; -use onefuzz_result::job_result::{init_job_result, TaskJobResultClient}; use onefuzz_telemetry::{ self as telemetry, Event::task_start, EventData, InstanceTelemetryKey, MicrosoftTelemetryKey, Role, @@ -51,8 +50,6 @@ pub struct CommonConfig { pub heartbeat_queue: Option, - pub job_result_queue: Option, - pub instance_telemetry_key: Option, pub microsoft_telemetry_key: Option, @@ -106,23 +103,6 @@ impl CommonConfig { None => Ok(None), } } - - pub async fn init_job_result(&self) -> Result> { - match &self.job_result_queue { - Some(url) => { - let result = init_job_result( - url.clone(), - self.task_id, - self.job_id, - self.machine_identity.machine_id, - self.machine_identity.machine_name.clone(), - ) - .await?; - Ok(Some(result)) - } - None => Ok(None), - } - } } #[derive(Debug, Deserialize)] diff --git a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs index 4fde9efb31..b112cfefbe 100644 --- a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs @@ -26,8 +26,6 @@ use onefuzz_file_format::coverage::{ binary::{v1::BinaryCoverageJson as BinaryCoverageJsonV1, BinaryCoverageJson}, source::{v1::SourceCoverageJson as SourceCoverageJsonV1, SourceCoverageJson}, }; -use onefuzz_result::job_result::JobResultData; -use onefuzz_result::job_result::{JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{event, warn, Event::coverage_data, Event::coverage_failed, EventData}; use storage_queue::{Message, QueueClient}; use tokio::fs; @@ -116,7 +114,7 @@ impl CoverageTask { let allowlist = self.load_target_allowlist().await?; let heartbeat = self.config.common.init_heartbeat(None).await?; - let job_result = self.config.common.init_job_result().await?; + let mut seen_inputs = false; let target_exe_path = @@ -131,7 +129,6 @@ impl CoverageTask { coverage, allowlist, heartbeat, - job_result, target_exe.to_string(), )?; @@ -222,7 +219,6 @@ struct TaskContext<'a> { module_allowlist: AllowList, source_allowlist: Arc, heartbeat: Option, - job_result: Option, cache: Arc, } @@ -232,7 +228,6 @@ impl<'a> TaskContext<'a> { coverage: BinaryCoverage, allowlist: TargetAllowList, heartbeat: Option, - job_result: Option, target_exe: String, ) -> Result { let cache = DebugInfoCache::new(allowlist.source_files.clone()); @@ -252,7 +247,6 @@ impl<'a> TaskContext<'a> { module_allowlist: allowlist.modules, source_allowlist: Arc::new(allowlist.source_files), heartbeat, - job_result, cache: Arc::new(cache), }) } @@ -461,16 +455,7 @@ impl<'a> TaskContext<'a> { let s = CoverageStats::new(&coverage); event!(coverage_data; Covered = s.covered, Features = s.features, Rate = s.rate); metric!(coverage_data; 1.0; Covered = s.covered, Features = s.features, Rate = s.rate); - self.job_result - .send_direct( - JobResultData::CoverageData, - HashMap::from([ - ("covered".to_string(), s.covered as f64), - ("features".to_string(), s.features as f64), - ("rate".to_string(), s.rate), - ]), - ) - .await; + Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs index bd7511cac2..d9116a1ed2 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs @@ -73,7 +73,6 @@ impl GeneratorTask { } let hb_client = self.config.common.init_heartbeat(None).await?; - let jr_client = self.config.common.init_job_result().await?; for dir in &self.config.readonly_inputs { dir.init_pull().await?; @@ -85,10 +84,7 @@ impl GeneratorTask { self.config.ensemble_sync_delay, ); - let crash_dir_monitor = self - .config - .crashes - .monitor_results(new_result, false, &jr_client); + let crash_dir_monitor = self.config.crashes.monitor_results(new_result, false); let fuzzer = self.fuzzing_loop(hb_client); @@ -302,7 +298,6 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), - job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs index bfd9f3f5cc..4f8c67ae8e 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs @@ -1,11 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::tasks::{ - config::CommonConfig, - heartbeat::{HeartbeatSender, TaskHeartbeatClient}, - utils::default_bool_true, -}; +use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender, utils::default_bool_true}; use anyhow::{Context, Result}; use arraydeque::{ArrayDeque, Wrapping}; use async_trait::async_trait; @@ -16,7 +12,6 @@ use onefuzz::{ process::ExitStatus, syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir}, }; -use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{new_coverage, new_crashdump, new_result, runtime_stats}, EventData, @@ -131,31 +126,21 @@ where self.verify().await?; let hb_client = self.config.common.init_heartbeat(None).await?; - let jr_client = self.config.common.init_job_result().await?; // To be scheduled. let resync = self.continuous_sync_inputs(); - - let new_inputs = self - .config - .inputs - .monitor_results(new_coverage, true, &jr_client); - let new_crashes = self - .config - .crashes - .monitor_results(new_result, true, &jr_client); + let new_inputs = self.config.inputs.monitor_results(new_coverage, true); + let new_crashes = self.config.crashes.monitor_results(new_result, true); let new_crashdumps = async { if let Some(crashdumps) = &self.config.crashdumps { - crashdumps - .monitor_results(new_crashdump, true, &jr_client) - .await + crashdumps.monitor_results(new_crashdump, true).await } else { Ok(()) } }; let (stats_sender, stats_receiver) = mpsc::unbounded_channel(); - let report_stats = report_runtime_stats(stats_receiver, &hb_client, &jr_client); + let report_stats = report_runtime_stats(stats_receiver, hb_client); let fuzzers = self.run_fuzzers(Some(&stats_sender)); futures::try_join!( resync, @@ -198,7 +183,7 @@ where .inputs .local_path .parent() - .ok_or_else(|| anyhow!("invalid input path"))?; + .ok_or_else(|| anyhow!("Invalid input path"))?; let temp_path = task_dir.join(".temp"); tokio::fs::create_dir_all(&temp_path).await?; let temp_dir = tempdir_in(temp_path)?; @@ -516,7 +501,7 @@ impl TotalStats { self.execs_sec = self.worker_stats.values().map(|x| x.execs_sec).sum(); } - async fn report(&self, jr_client: &Option) { + fn report(&self) { event!( runtime_stats; EventData::Count = self.count, @@ -528,17 +513,6 @@ impl TotalStats { EventData::Count = self.count, EventData::ExecsSecond = self.execs_sec ); - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::RuntimeStats, - HashMap::from([ - ("total_count".to_string(), self.count as f64), - ("execs_sec".to_string(), self.execs_sec), - ]), - ) - .await; - } } } @@ -568,8 +542,7 @@ impl Timer { // are approximating nearest-neighbor interpolation on the runtime stats time series. async fn report_runtime_stats( mut stats_channel: mpsc::UnboundedReceiver, - heartbeat_client: &Option, - jr_client: &Option, + heartbeat_client: impl HeartbeatSender, ) -> Result<()> { // Cache the last-reported stats for a given worker. // @@ -578,7 +551,7 @@ async fn report_runtime_stats( let mut total = TotalStats::default(); // report all zeros to start - total.report(jr_client).await; + total.report(); let timer = Timer::new(RUNTIME_STATS_PERIOD); @@ -587,10 +560,10 @@ async fn report_runtime_stats( Some(stats) = stats_channel.recv() => { heartbeat_client.alive(); total.update(stats); - total.report(jr_client).await + total.report() } _ = timer.wait() => { - total.report(jr_client).await + total.report() } } } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs index 3f00e20b8d..de1e1106ba 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs @@ -79,10 +79,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { remote_path: config.crashes.remote_path.clone(), }; crashes.init().await?; - - let jr_client = config.common.init_job_result().await?; - - let monitor_crashes = crashes.monitor_results(new_result, false, &jr_client); + let monitor_crashes = crashes.monitor_results(new_result, false); // setup crashdumps let (crashdump_dir, monitor_crashdumps) = { @@ -98,12 +95,9 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { }; let monitor_dir = crashdump_dir.clone(); - let monitor_jr_client = config.common.init_job_result().await?; let monitor_crashdumps = async move { if let Some(crashdumps) = monitor_dir { - crashdumps - .monitor_results(new_crashdump, false, &monitor_jr_client) - .await + crashdumps.monitor_results(new_crashdump, false).await } else { Ok(()) } @@ -135,13 +129,11 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { if let Some(no_repro) = &config.no_repro { no_repro.init().await?; } - let monitor_reports_future = monitor_reports( reports_dir.path(), &config.unique_reports, &config.reports, &config.no_repro, - &jr_client, ); let inputs = SyncedDir { @@ -164,7 +156,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { delay_with_jitter(delay).await; } } - let monitor_inputs = inputs.monitor_results(new_coverage, false, &jr_client); + let monitor_inputs = inputs.monitor_results(new_coverage, false); let inputs_sync_cancellation = CancellationToken::new(); // never actually cancelled let inputs_sync_task = inputs.continuous_sync(Pull, config.ensemble_sync_delay, &inputs_sync_cancellation); @@ -452,7 +444,6 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), - job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/heartbeat.rs b/src/agent/onefuzz-task/src/tasks/heartbeat.rs index e13b661909..515fa39d0c 100644 --- a/src/agent/onefuzz-task/src/tasks/heartbeat.rs +++ b/src/agent/onefuzz-task/src/tasks/heartbeat.rs @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use crate::onefuzz::heartbeat::HeartbeatClient; use anyhow::Result; -use onefuzz::heartbeat::HeartbeatClient; use reqwest::Url; use serde::{self, Deserialize, Serialize}; use std::time::Duration; diff --git a/src/agent/onefuzz-task/src/tasks/merge/generic.rs b/src/agent/onefuzz-task/src/tasks/merge/generic.rs index 3b6a2094d8..4f2e8234a8 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/generic.rs @@ -83,7 +83,7 @@ pub async fn spawn(config: &Config) -> Result<()> { } } } else { - debug!("no new candidate inputs found, sleeping"); + warn!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; }; } diff --git a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs index 2d53bc8c07..1c334b3f18 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs @@ -120,7 +120,7 @@ async fn process_message(config: &Config, input_queue: QueueClient) -> Result<() } Ok(()) } else { - debug!("no new candidate inputs found, sleeping"); + warn!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/regression/common.rs b/src/agent/onefuzz-task/src/tasks/regression/common.rs index b61a97df4c..60023cfa6e 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/common.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/common.rs @@ -2,14 +2,12 @@ // Licensed under the MIT License. use crate::tasks::{ - config::CommonConfig, heartbeat::{HeartbeatSender, TaskHeartbeatClient}, report::crash_report::{parse_report_file, CrashTestResult, RegressionReport}, }; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::syncdir::SyncedDir; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use std::path::PathBuf; @@ -26,7 +24,7 @@ pub trait RegressionHandler { /// Runs the regression task pub async fn run( - common_config: &CommonConfig, + heartbeat_client: Option, regression_reports: &SyncedDir, crashes: &SyncedDir, report_dirs: &[&SyncedDir], @@ -37,9 +35,6 @@ pub async fn run( info!("starting regression task"); regression_reports.init().await?; - let heartbeat_client = common_config.init_heartbeat(None).await?; - let job_result_client = common_config.init_job_result().await?; - handle_crash_reports( handler, crashes, @@ -47,7 +42,6 @@ pub async fn run( report_list, regression_reports, &heartbeat_client, - &job_result_client, ) .await .context("handling crash reports")?; @@ -58,7 +52,6 @@ pub async fn run( readonly_inputs, regression_reports, &heartbeat_client, - &job_result_client, ) .await .context("handling inputs")?; @@ -78,7 +71,6 @@ pub async fn handle_inputs( readonly_inputs: &SyncedDir, regression_reports: &SyncedDir, heartbeat_client: &Option, - job_result_client: &Option, ) -> Result<()> { readonly_inputs.init_pull().await?; let mut input_files = tokio::fs::read_dir(&readonly_inputs.local_path).await?; @@ -103,7 +95,7 @@ pub async fn handle_inputs( crash_test_result, original_crash_test_result: None, } - .save(None, regression_reports, job_result_client) + .save(None, regression_reports) .await? } @@ -117,7 +109,6 @@ pub async fn handle_crash_reports( report_list: &Option>, regression_reports: &SyncedDir, heartbeat_client: &Option, - job_result_client: &Option, ) -> Result<()> { // without crash report containers, skip this method if report_dirs.is_empty() { @@ -167,7 +158,7 @@ pub async fn handle_crash_reports( crash_test_result, original_crash_test_result: Some(original_crash_test_result), } - .save(Some(file_name), regression_reports, job_result_client) + .save(Some(file_name), regression_reports) .await? } } diff --git a/src/agent/onefuzz-task/src/tasks/regression/generic.rs b/src/agent/onefuzz-task/src/tasks/regression/generic.rs index 8570208d59..640e80db9a 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/generic.rs @@ -89,6 +89,7 @@ impl GenericRegressionTask { pub async fn run(&self) -> Result<()> { info!("Starting generic regression task"); + let heartbeat_client = self.config.common.init_heartbeat(None).await?; let mut report_dirs = vec![]; for dir in vec![ @@ -102,7 +103,7 @@ impl GenericRegressionTask { report_dirs.push(dir); } common::run( - &self.config.common, + heartbeat_client, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs index e65f46bb64..06dd7c00d9 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs @@ -103,8 +103,9 @@ impl LibFuzzerRegressionTask { report_dirs.push(dir); } + let heartbeat_client = self.config.common.init_heartbeat(None).await?; common::run( - &self.config.common, + heartbeat_client, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs index 290b98ccde..23171bc432 100644 --- a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs @@ -3,7 +3,6 @@ use anyhow::{Context, Result}; use onefuzz::{blob::BlobUrl, monitor::DirectoryMonitor, syncdir::SyncedDir}; -use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{ new_report, new_unable_to_reproduce, new_unique_report, regression_report, @@ -13,7 +12,6 @@ use onefuzz_telemetry::{ }; use serde::{Deserialize, Serialize}; use stacktrace_parser::CrashLog; -use std::collections::HashMap; use std::path::{Path, PathBuf}; use uuid::Uuid; @@ -113,7 +111,6 @@ impl RegressionReport { self, report_name: Option, regression_reports: &SyncedDir, - jr_client: &Option, ) -> Result<()> { let (event, name) = match &self.crash_test_result { CrashTestResult::CrashReport(report) => { @@ -129,15 +126,6 @@ impl RegressionReport { if upload_or_save_local(&self, &name, regression_reports).await? { event!(event; EventData::Path = name.clone()); metric!(event; 1.0; EventData::Path = name.clone()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NewRegressionReport, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } Ok(()) } @@ -161,7 +149,6 @@ impl CrashTestResult { unique_reports: &Option, reports: &Option, no_repro: &Option, - jr_client: &Option, ) -> Result<()> { match self { Self::CrashReport(report) => { @@ -171,15 +158,6 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, unique_reports).await? { event!(new_unique_report; EventData::Path = report.unique_blob_name()); metric!(new_unique_report; 1.0; EventData::Path = report.unique_blob_name()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NewUniqueReport, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } } @@ -188,15 +166,6 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, reports).await? { event!(new_report; EventData::Path = report.blob_name()); metric!(new_report; 1.0; EventData::Path = report.blob_name()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NewReport, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } } } @@ -207,15 +176,6 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, no_repro).await? { event!(new_unable_to_reproduce; EventData::Path = report.blob_name()); metric!(new_unable_to_reproduce; 1.0; EventData::Path = report.blob_name()); - - if let Some(jr_client) = jr_client { - let _ = jr_client - .send_direct( - JobResultData::NoReproCrashingInput, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } } } } @@ -364,7 +324,6 @@ pub async fn monitor_reports( unique_reports: &Option, reports: &Option, no_crash: &Option, - jr_client: &Option, ) -> Result<()> { if unique_reports.is_none() && reports.is_none() && no_crash.is_none() { debug!("no report directories configured"); @@ -375,9 +334,7 @@ pub async fn monitor_reports( while let Some(file) = monitor.next_file().await? { let result = parse_report_file(file).await?; - result - .save(unique_reports, reports, no_crash, jr_client) - .await?; + result.save(unique_reports, reports, no_crash).await?; } Ok(()) diff --git a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs index b8659845de..9b626a7d89 100644 --- a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs @@ -8,25 +8,25 @@ use std::{ sync::Arc, }; -use crate::tasks::report::crash_report::*; -use crate::tasks::report::dotnet::common::collect_exception_info; -use crate::tasks::{ - config::CommonConfig, - generic::input_poller::*, - heartbeat::{HeartbeatSender, TaskHeartbeatClient}, - utils::{default_bool_true, try_resolve_setup_relative_path}, -}; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::expand::Expand; use onefuzz::fs::set_executable; use onefuzz::{blob::BlobUrl, sha256, syncdir::SyncedDir}; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use storage_queue::{Message, QueueClient}; use tokio::fs; +use crate::tasks::report::crash_report::*; +use crate::tasks::report::dotnet::common::collect_exception_info; +use crate::tasks::{ + config::CommonConfig, + generic::input_poller::*, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::{default_bool_true, try_resolve_setup_relative_path}, +}; + const DOTNET_DUMP_TOOL_NAME: &str = "dotnet-dump"; #[derive(Debug, Deserialize)] @@ -114,18 +114,15 @@ impl DotnetCrashReportTask { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, - job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; - let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, - job_result_client, }) } @@ -263,7 +260,6 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, - &self.job_result_client, ) .await; diff --git a/src/agent/onefuzz-task/src/tasks/report/generic.rs b/src/agent/onefuzz-task/src/tasks/report/generic.rs index 8ad259f0a5..9088f98acc 100644 --- a/src/agent/onefuzz-task/src/tasks/report/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/generic.rs @@ -13,7 +13,6 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, input_tester::Tester, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -74,9 +73,7 @@ impl ReportTask { pub async fn managed_run(&mut self) -> Result<()> { info!("Starting generic crash report task"); let heartbeat_client = self.config.common.init_heartbeat(None).await?; - let job_result_client = self.config.common.init_job_result().await?; - let mut processor = - GenericReportProcessor::new(&self.config, heartbeat_client, job_result_client); + let mut processor = GenericReportProcessor::new(&self.config, heartbeat_client); #[allow(clippy::manual_flatten)] for entry in [ @@ -186,19 +183,13 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct GenericReportProcessor<'a> { config: &'a Config, heartbeat_client: Option, - job_result_client: Option, } impl<'a> GenericReportProcessor<'a> { - pub fn new( - config: &'a Config, - heartbeat_client: Option, - job_result_client: Option, - ) -> Self { + pub fn new(config: &'a Config, heartbeat_client: Option) -> Self { Self { config, heartbeat_client, - job_result_client, } } @@ -248,7 +239,6 @@ impl<'a> Processor for GenericReportProcessor<'a> { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, - &self.job_result_client, ) .await .context("saving report failed") diff --git a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs index 587ed2e3dc..f18f638fa3 100644 --- a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs @@ -13,7 +13,6 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, libfuzzer::LibFuzzer, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; -use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -197,18 +196,15 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, - job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; - let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, - job_result_client, }) } @@ -261,7 +257,6 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, - &self.job_result_client, ) .await } diff --git a/src/agent/onefuzz/Cargo.toml b/src/agent/onefuzz/Cargo.toml index 1f3c27985c..c096c8ddfc 100644 --- a/src/agent/onefuzz/Cargo.toml +++ b/src/agent/onefuzz/Cargo.toml @@ -44,7 +44,6 @@ tempfile = "3.7.0" process_control = "4.0" reqwest-retry = { path = "../reqwest-retry" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } -onefuzz-result = { path = "../onefuzz-result" } stacktrace-parser = { path = "../stacktrace-parser" } backoff = { version = "0.4", features = ["tokio"] } diff --git a/src/agent/onefuzz/src/blob/url.rs b/src/agent/onefuzz/src/blob/url.rs index 134b59dea0..f55ffbb23a 100644 --- a/src/agent/onefuzz/src/blob/url.rs +++ b/src/agent/onefuzz/src/blob/url.rs @@ -192,15 +192,10 @@ impl BlobContainerUrl { } pub fn as_path(&self, prefix: impl AsRef) -> Result { - match (self.account(), self.container()) { - (Some(account), Some(container)) => { - let mut path = PathBuf::new(); - path.push(account); - path.push(container); - Ok(prefix.as_ref().join(path)) - } - _ => bail!("Invalid container Url"), - } + let dir = self + .account() + .ok_or_else(|| anyhow!("Invalid container Url"))?; + Ok(prefix.as_ref().join(dir)) } } @@ -531,14 +526,4 @@ mod tests { "id:000000,sig:06,src:000000,op:havoc,rep:128" ); } - - #[test] - fn test_as_path() -> Result<()> { - let root = PathBuf::from(r"/onefuzz"); - let url = BlobContainerUrl::parse("https://myaccount.blob.core.windows.net/mycontainer")?; - let path = url.as_path(root)?; - assert_eq!(PathBuf::from(r"/onefuzz/myaccount/mycontainer"), path); - - Ok(()) - } } diff --git a/src/agent/onefuzz/src/syncdir.rs b/src/agent/onefuzz/src/syncdir.rs index 2e73b7a694..0252099561 100644 --- a/src/agent/onefuzz/src/syncdir.rs +++ b/src/agent/onefuzz/src/syncdir.rs @@ -11,12 +11,10 @@ use crate::{ }; use anyhow::{Context, Result}; use dunce::canonicalize; -use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{Event, EventData}; use reqwest::{StatusCode, Url}; use reqwest_retry::{RetryCheck, SendRetry, DEFAULT_RETRY_PERIOD, MAX_RETRY_ATTEMPTS}; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; use std::{env::current_dir, path::PathBuf, str, time::Duration}; use tokio::{fs, select}; use tokio_util::sync::CancellationToken; @@ -243,7 +241,6 @@ impl SyncedDir { url: BlobContainerUrl, event: Event, ignore_dotfiles: bool, - jr_client: &Option, ) -> Result<()> { debug!("monitoring {}", path.display()); @@ -268,39 +265,9 @@ impl SyncedDir { if ignore_dotfiles && file_name_event_str.starts_with('.') { continue; } + event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); - if let Some(jr_client) = jr_client { - match event { - Event::new_result => { - jr_client - .send_direct( - JobResultData::NewCrashingInput, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - Event::new_coverage => { - jr_client - .send_direct( - JobResultData::CoverageData, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - Event::new_crashdump => { - jr_client - .send_direct( - JobResultData::NewCrashDump, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - _ => { - warn!("Unhandled job result!"); - } - } - } let destination = path.join(file_name); if let Err(err) = fs::copy(&item, &destination).await { let error_message = format!( @@ -338,29 +305,6 @@ impl SyncedDir { event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); - if let Some(jr_client) = jr_client { - match event { - Event::new_result => { - jr_client - .send_direct( - JobResultData::NewCrashingInput, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - Event::new_coverage => { - jr_client - .send_direct( - JobResultData::CoverageData, - HashMap::from([("count".to_string(), 1.0)]), - ) - .await; - } - _ => { - warn!("Unhandled job result!"); - } - } - } if let Err(err) = uploader.upload(item.clone()).await { let error_message = format!( "Couldn't upload file. path:{} dir:{} err:{:?}", @@ -392,12 +336,7 @@ impl SyncedDir { /// The intent of this is to support use cases where we usually want a directory /// to be initialized, but a user-supplied binary, (such as AFL) logically owns /// a directory, and may reset it. - pub async fn monitor_results( - &self, - event: Event, - ignore_dotfiles: bool, - job_result_client: &Option, - ) -> Result<()> { + pub async fn monitor_results(&self, event: Event, ignore_dotfiles: bool) -> Result<()> { if let Some(url) = self.remote_path.clone() { loop { debug!("waiting to monitor {}", self.local_path.display()); @@ -416,7 +355,6 @@ impl SyncedDir { url.clone(), event.clone(), ignore_dotfiles, - job_result_client, ) .await?; } diff --git a/src/deployment/bicep-templates/storageAccounts.bicep b/src/deployment/bicep-templates/storageAccounts.bicep index 27f2da21d8..6a96cea6a0 100644 --- a/src/deployment/bicep-templates/storageAccounts.bicep +++ b/src/deployment/bicep-templates/storageAccounts.bicep @@ -33,7 +33,7 @@ var storageAccountFuncQueuesParams = [ 'update-queue' 'webhooks' 'signalr-events' - 'job-result' + 'custom-metrics' ] var fileChangesQueueIndex = 0 diff --git a/src/integration-tests/integration-test.py b/src/integration-tests/integration-test.py index 15ffcfb9fe..057404ceff 100755 --- a/src/integration-tests/integration-test.py +++ b/src/integration-tests/integration-test.py @@ -88,7 +88,6 @@ class Integration(BaseModel): target_method: Optional[str] setup_dir: Optional[str] target_env: Optional[Dict[str, str]] - pool: PoolName TARGETS: Dict[str, Integration] = { @@ -98,7 +97,6 @@ class Integration(BaseModel): target_exe="fuzz.exe", inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, - pool="linux", ), "linux-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -126,7 +124,6 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], - pool="linux", ), "linux-libfuzzer-with-options": Integration( template=TemplateType.libfuzzer, @@ -140,7 +137,6 @@ class Integration(BaseModel): }, reboot_after_setup=True, fuzzing_target_options=["-runs=10000000"], - pool="linux", ), "linux-libfuzzer-dlopen": Integration( template=TemplateType.libfuzzer, @@ -154,7 +150,6 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, - pool="linux", ), "linux-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -168,7 +163,6 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, - pool="linux", ), "linux-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -186,7 +180,6 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, - pool="linux", ), "linux-libfuzzer-aarch64-crosscompile": Integration( template=TemplateType.libfuzzer_qemu_user, @@ -196,7 +189,6 @@ class Integration(BaseModel): use_setup=True, wait_for_files={ContainerType.inputs: 2, ContainerType.crashes: 1}, test_repro=False, - pool="linux", ), "linux-libfuzzer-rust": Integration( template=TemplateType.libfuzzer, @@ -204,7 +196,6 @@ class Integration(BaseModel): target_exe="fuzz_target_1", wait_for_files={ContainerType.unique_reports: 1, ContainerType.coverage: 1}, fuzzing_target_options=["--test:{extra_setup_dir}"], - pool="linux", ), "linux-trivial-crash": Integration( template=TemplateType.radamsa, @@ -213,7 +204,6 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, - pool="linux", ), "linux-trivial-crash-asan": Integration( template=TemplateType.radamsa, @@ -223,28 +213,6 @@ class Integration(BaseModel): wait_for_files={ContainerType.unique_reports: 1}, check_asan_log=True, disable_check_debugger=True, - pool="linux", - ), - # TODO: Don't install OMS extension on linux anymore - # TODO: Figure out why non mariner work is being scheduled to the mariner pool - "mariner-libfuzzer": Integration( - template=TemplateType.libfuzzer, - os=OS.linux, - target_exe="fuzz.exe", - inputs="seeds", - wait_for_files={ - ContainerType.unique_reports: 1, - ContainerType.coverage: 1, - ContainerType.inputs: 2, - ContainerType.extra_output: 1, - }, - reboot_after_setup=True, - inject_fake_regression=True, - fuzzing_target_options=[ - "--test:{extra_setup_dir}", - "--write_test_file={extra_output_dir}/test.txt", - ], - pool=PoolName("mariner") ), "windows-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -266,7 +234,6 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], - pool="windows", ), "windows-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -279,7 +246,6 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, - pool="windows", ), "windows-libfuzzer-load-library": Integration( template=TemplateType.libfuzzer, @@ -292,7 +258,6 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, - pool="windows", ), "windows-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -310,7 +275,6 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, - pool="windows", ), "windows-trivial-crash": Integration( template=TemplateType.radamsa, @@ -319,7 +283,6 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, - pool="windows", ), } @@ -388,7 +351,7 @@ def try_info_get(data: Any) -> None: self.inject_log(self.start_log_marker) for entry in os_list: - name = self.build_pool_name(entry.name) + name = PoolName(f"testpool-{entry.name}-{self.test_id}") self.logger.info("creating pool: %s:%s", entry.name, name) self.of.pools.create(name, entry) self.logger.info("creating scaleset for pool: %s", name) @@ -396,15 +359,6 @@ def try_info_get(data: Any) -> None: name, pool_size, region=region, initial_size=pool_size ) - name = self.build_pool_name("mariner") - self.logger.info("creating pool: %s:%s", "mariner", name) - self.of.pools.create(name, OS.linux) - self.logger.info("creating scaleset for pool: %s", name) - self.of.scalesets.create( - name, pool_size, region=region, initial_size=pool_size, image="MicrosoftCBLMariner:cbl-mariner:cbl-mariner-2-gen2:latest" - ) - - class UnmanagedPool: def __init__( self, @@ -606,9 +560,12 @@ def launch( ) -> List[UUID]: """Launch all of the fuzzing templates""" - pool = None + pools: Dict[OS, Pool] = {} if unmanaged_pool is not None: - pool = unmanaged_pool.pool_name + pools[unmanaged_pool.the_os] = self.of.pools.get(unmanaged_pool.pool_name) + else: + for pool in self.of.pools.list(): + pools[pool.os] = pool job_ids = [] @@ -619,8 +576,8 @@ def launch( if config.os not in os_list: continue - if pool is None: - pool = self.build_pool_name(config.pool) + if config.os not in pools.keys(): + raise Exception(f"No pool for target: {target} ,os: {config.os}") self.logger.info("launching: %s", target) @@ -644,9 +601,8 @@ def launch( setup = Directory(os.path.join(setup, config.nested_setup_dir)) job: Optional[Job] = None - job = self.build_job( - duration, pool, target, config, setup, target_exe, inputs + duration, pools, target, config, setup, target_exe, inputs ) if config.inject_fake_regression and job is not None: @@ -662,7 +618,7 @@ def launch( def build_job( self, duration: int, - pool: PoolName, + pools: Dict[OS, Pool], target: str, config: Integration, setup: Optional[Directory], @@ -678,7 +634,7 @@ def build_job( self.project, target, BUILD, - pool, + pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -703,7 +659,7 @@ def build_job( self.project, target, BUILD, - pool, + pools[config.os].name, target_dll=File(config.target_exe), inputs=inputs, setup_dir=setup, @@ -719,7 +675,7 @@ def build_job( self.project, target, BUILD, - pool, + pools[config.os].name, inputs=inputs, target_exe=target_exe, duration=duration, @@ -732,7 +688,7 @@ def build_job( self.project, target, BUILD, - pool_name=pool, + pool_name=pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -747,7 +703,7 @@ def build_job( self.project, target, BUILD, - pool_name=pool, + pool_name=pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -1277,9 +1233,6 @@ def check_logs_for_errors(self) -> None: if seen_errors: raise Exception("logs included errors") - - def build_pool_name(self, os_type: str) -> PoolName: - return PoolName(f"testpool-{os_type}-{self.test_id}") class Run(Command): diff --git a/src/runtime-tools/linux/setup.sh b/src/runtime-tools/linux/setup.sh old mode 100644 new mode 100755 index 794e827f4d..f6859003b4 --- a/src/runtime-tools/linux/setup.sh +++ b/src/runtime-tools/linux/setup.sh @@ -18,14 +18,6 @@ export DOTNET_CLI_HOME="$DOTNET_ROOT" export ONEFUZZ_ROOT=/onefuzz export LLVM_SYMBOLIZER_PATH=/onefuzz/bin/llvm-symbolizer -# `logger` won't work on mariner unless we install this package first -if type yum > /dev/null 2> /dev/null; then - until yum install -y util-linux sudo; do - echo "yum failed. sleep 10s, then retrying" - sleep 10 - done -fi - logger "onefuzz: making directories" sudo mkdir -p /onefuzz/downloaded sudo chown -R $(whoami) /onefuzz @@ -142,53 +134,31 @@ if type apt > /dev/null 2> /dev/null; then sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH fi - # Needed to install dotnet + # Install dotnet until sudo apt install -y curl libicu-dev; do logger "apt failed, sleeping 10s then retrying" sleep 10 done -elif type yum > /dev/null 2> /dev/null; then - until yum install -y gdb gdb-gdbserver libunwind awk ca-certificates tar yum-utils shadow-utils cronie procps; do - echo "yum failed. sleep 10s, then retrying" - sleep 10 - done - - # Install updated Microsoft Open Management Infrastructure - github.com/microsoft/omi - yum-config-manager --add-repo=https://packages.microsoft.com/config/rhel/8/prod.repo 2>&1 | logger -s -i -t 'onefuzz-OMI-add-MS-repo' - yum install -y omi 2>&1 | logger -s -i -t 'onefuzz-OMI-install' + logger "downloading dotnet install" + curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' + chmod +x dotnet-install.sh - if ! [ -f ${LLVM_SYMBOLIZER_PATH} ]; then - until yum install -y llvm-12.0.1; do - echo "yum failed, sleeping 10s then retrying" - sleep 10 - done - - # If specifying symbolizer, exe name must be a "known symbolizer". - # Using `llvm-symbolizer` works for clang 8 .. 12. - sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH - fi + for version in "${DOTNET_VERSIONS[@]}"; do + logger "running dotnet install $version" + /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' + done + rm dotnet-install.sh + + logger "install dotnet tools" + pushd "$DOTNET_ROOT" + ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + "$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + "$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + "$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' + popd fi -# Install dotnet -logger "downloading dotnet install" -curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' -chmod +x dotnet-install.sh - -for version in "${DOTNET_VERSIONS[@]}"; do - logger "running dotnet install $version" - /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' -done -rm dotnet-install.sh - -logger "install dotnet tools" -pushd "$DOTNET_ROOT" -ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -"$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -"$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -"$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' -popd - if [ -v DOCKER_BUILD ]; then echo "building for docker" elif [ -d /etc/systemd/system ]; then