diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 4059b3d7c1..d3fcf050ed 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -13,6 +13,7 @@ "**/target/**": true }, "lldb.executable": "/usr/bin/lldb", + "dotnet.server.useOmnisharp": true, "omnisharp.enableEditorConfigSupport": true, "omnisharp.enableRoslynAnalyzers": true, "python.defaultInterpreterPath": "/workspaces/onefuzz/src/venv/bin/python", @@ -48,4 +49,4 @@ "features": { "ghcr.io/devcontainers/features/azure-cli:1": {} } -} +} \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 456491b6c7..5f07124dd7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -550,9 +550,11 @@ jobs: mkdir -p artifacts/linux-libfuzzer mkdir -p artifacts/linux-libfuzzer-with-options + mkdir -p artifacts/mariner-libfuzzer (cd libfuzzer ; make ) cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer-with-options + cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/mariner-libfuzzer mkdir -p artifacts/linux-libfuzzer-regression (cd libfuzzer-regression ; make ) diff --git a/src/ApiService/ApiService/Functions/QueueJobResult.cs b/src/ApiService/ApiService/Functions/QueueJobResult.cs new file mode 100644 index 0000000000..d781a4d1e1 --- /dev/null +++ b/src/ApiService/ApiService/Functions/QueueJobResult.cs @@ -0,0 +1,60 @@ +using System.Text.Json; +using Microsoft.Azure.Functions.Worker; +using Microsoft.Extensions.Logging; +using Microsoft.OneFuzz.Service.OneFuzzLib.Orm; +namespace Microsoft.OneFuzz.Service.Functions; + + +public class QueueJobResult { + private readonly ILogger _log; + private readonly IOnefuzzContext _context; + + public QueueJobResult(ILogger logTracer, IOnefuzzContext context) { + _log = logTracer; + _context = context; + } + + [Function("QueueJobResult")] + public async Async.Task Run([QueueTrigger("job-result", Connection = "AzureWebJobsStorage")] string msg) { + + var _tasks = _context.TaskOperations; + var _jobs = _context.JobOperations; + + _log.LogInformation("job result: {msg}", msg); + var jr = JsonSerializer.Deserialize(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}"); + + var task = await _tasks.GetByTaskId(jr.TaskId); + if (task == null) { + _log.LogWarning("invalid {TaskId}", jr.TaskId); + return; + } + + var job = await _jobs.Get(task.JobId); + if (job == null) { + _log.LogWarning("invalid {JobId}", task.JobId); + return; + } + + JobResultData? data = jr.Data; + if (data == null) { + _log.LogWarning($"job result data is empty, throwing out: {jr}"); + return; + } + + var jobResultType = data.Type; + _log.LogInformation($"job result data type: {jobResultType}"); + + Dictionary value; + if (jr.Value.Count > 0) { + value = jr.Value; + } else { + _log.LogWarning($"job result data is empty, throwing out: {jr}"); + return; + } + + var jobResult = await _context.JobResultOperations.CreateOrUpdate(job.JobId, jobResultType, value); + if (!jobResult.IsOk) { + _log.LogError("failed to create or update with job result {JobId}", job.JobId); + } + } +} diff --git a/src/ApiService/ApiService/OneFuzzTypes/Model.cs b/src/ApiService/ApiService/OneFuzzTypes/Model.cs index 67cbea39b6..23811e9fe0 100644 --- a/src/ApiService/ApiService/OneFuzzTypes/Model.cs +++ b/src/ApiService/ApiService/OneFuzzTypes/Model.cs @@ -33,6 +33,19 @@ public enum HeartbeatType { TaskAlive, } +[SkipRename] +public enum JobResultType { + NewCrashingInput, + NoReproCrashingInput, + NewReport, + NewUniqueReport, + NewRegressionReport, + NewCoverage, + NewCrashDump, + CoverageData, + RuntimeStats, +} + public record HeartbeatData(HeartbeatType Type); public record TaskHeartbeatEntry( @@ -41,6 +54,16 @@ public record TaskHeartbeatEntry( Guid MachineId, HeartbeatData[] Data); +public record JobResultData(JobResultType Type); + +public record TaskJobResultEntry( + Guid TaskId, + Guid? JobId, + Guid MachineId, + JobResultData Data, + Dictionary Value + ); + public record NodeHeartbeatEntry(Guid NodeId, HeartbeatData[] Data); public record NodeCommandStopIfFree(); @@ -895,6 +918,27 @@ public record SecretAddress(Uri Url) : ISecret { public record SecretData(ISecret Secret) { } +public record JobResult( + [PartitionKey][RowKey] Guid JobId, + string Project, + string Name, + double NewCrashingInput = 0, + double NoReproCrashingInput = 0, + double NewReport = 0, + double NewUniqueReport = 0, + double NewRegressionReport = 0, + double NewCrashDump = 0, + double InstructionsCovered = 0, + double TotalInstructions = 0, + double CoverageRate = 0, + double IterationCount = 0 +) : EntityBase() { + public JobResult(Guid JobId, string Project, string Name) : this( + JobId: JobId, + Project: Project, + Name: Name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) { } +} + public record JobConfig( string Project, string Name, @@ -1061,6 +1105,7 @@ public record TaskUnitConfig( string? InstanceTelemetryKey, string? MicrosoftTelemetryKey, Uri HeartbeatQueue, + Uri JobResultQueue, Dictionary Tags ) { public Uri? inputQueue { get; set; } diff --git a/src/ApiService/ApiService/Program.cs b/src/ApiService/ApiService/Program.cs index f425c00809..d5ee30b45e 100644 --- a/src/ApiService/ApiService/Program.cs +++ b/src/ApiService/ApiService/Program.cs @@ -118,6 +118,7 @@ public static async Async.Task Main() { .AddScoped() .AddScoped() .AddScoped() + .AddScoped() .AddScoped() .AddScoped() .AddScoped() diff --git a/src/ApiService/ApiService/onefuzzlib/Config.cs b/src/ApiService/ApiService/onefuzzlib/Config.cs index 71af317348..872cedbc01 100644 --- a/src/ApiService/ApiService/onefuzzlib/Config.cs +++ b/src/ApiService/ApiService/onefuzzlib/Config.cs @@ -71,6 +71,7 @@ private static BlobContainerSasPermissions ConvertPermissions(ContainerPermissio InstanceTelemetryKey: _serviceConfig.ApplicationInsightsInstrumentationKey, MicrosoftTelemetryKey: _serviceConfig.OneFuzzTelemetry, HeartbeatQueue: await _queue.GetQueueSas("task-heartbeat", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), + JobResultQueue: await _queue.GetQueueSas("job-result", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"), Tags: task.Config.Tags ?? new Dictionary() ); diff --git a/src/ApiService/ApiService/onefuzzlib/Extension.cs b/src/ApiService/ApiService/onefuzzlib/Extension.cs index 7995026eca..fbf62dd343 100644 --- a/src/ApiService/ApiService/onefuzzlib/Extension.cs +++ b/src/ApiService/ApiService/onefuzzlib/Extension.cs @@ -36,7 +36,9 @@ public async Async.Task> GenericExtensions(AzureLocati var extensions = new List(); var instanceConfig = await _context.ConfigOperations.Fetch(); - extensions.Add(await MonitorExtension(region, vmOs)); + if (vmOs == Os.Windows) { + extensions.Add(await MonitorExtension(region)); + } var depenency = DependencyExtension(region, vmOs); if (depenency is not null) { @@ -329,37 +331,21 @@ public async Async.Task AgentConfig(AzureLocation region, Os throw new NotSupportedException($"unsupported OS: {vmOs}"); } - public async Async.Task MonitorExtension(AzureLocation region, Os vmOs) { + public async Async.Task MonitorExtension(AzureLocation region) { var settings = await _context.LogAnalytics.GetMonitorSettings(); var extensionSettings = JsonSerializer.Serialize(new { WorkspaceId = settings.Id }, _extensionSerializerOptions); var protectedExtensionSettings = JsonSerializer.Serialize(new { WorkspaceKey = settings.Key }, _extensionSerializerOptions); - if (vmOs == Os.Windows) { - return new VMExtensionWrapper { - Location = region, - Name = "OMSExtension", - TypePropertiesType = "MicrosoftMonitoringAgent", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; - } else if (vmOs == Os.Linux) { - return new VMExtensionWrapper { - Location = region, - Name = "OmsAgentForLinux", - TypePropertiesType = "OmsAgentForLinux", - Publisher = "Microsoft.EnterpriseCloud.Monitoring", - TypeHandlerVersion = "1.0", - AutoUpgradeMinorVersion = true, - Settings = new BinaryData(extensionSettings), - ProtectedSettings = new BinaryData(protectedExtensionSettings), - EnableAutomaticUpgrade = false - }; - } else { - throw new NotSupportedException($"unsupported os: {vmOs}"); - } + return new VMExtensionWrapper { + Location = region, + Name = "OMSExtension", + TypePropertiesType = "MicrosoftMonitoringAgent", + Publisher = "Microsoft.EnterpriseCloud.Monitoring", + TypeHandlerVersion = "1.0", + AutoUpgradeMinorVersion = true, + Settings = new BinaryData(extensionSettings), + ProtectedSettings = new BinaryData(protectedExtensionSettings), + EnableAutomaticUpgrade = false + }; } diff --git a/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs new file mode 100644 index 0000000000..1166cf91d4 --- /dev/null +++ b/src/ApiService/ApiService/onefuzzlib/JobResultOperations.cs @@ -0,0 +1,121 @@ +using ApiService.OneFuzzLib.Orm; +using Microsoft.Extensions.Logging; +using Polly; +namespace Microsoft.OneFuzz.Service; + +public interface IJobResultOperations : IOrm { + + Async.Task GetJobResult(Guid jobId); + Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue); + +} +public class JobResultOperations : Orm, IJobResultOperations { + + public JobResultOperations(ILogger log, IOnefuzzContext context) + : base(log, context) { + } + + public async Async.Task GetJobResult(Guid jobId) { + return await SearchByPartitionKeys(new[] { jobId.ToString() }).SingleOrDefaultAsync(); + } + + private JobResult UpdateResult(JobResult result, JobResultType type, Dictionary resultValue) { + + var newResult = result; + double newValue; + switch (type) { + case JobResultType.NewCrashingInput: + newValue = result.NewCrashingInput + resultValue["count"]; + newResult = result with { NewCrashingInput = newValue }; + break; + case JobResultType.NewReport: + newValue = result.NewReport + resultValue["count"]; + newResult = result with { NewReport = newValue }; + break; + case JobResultType.NewUniqueReport: + newValue = result.NewUniqueReport + resultValue["count"]; + newResult = result with { NewUniqueReport = newValue }; + break; + case JobResultType.NewRegressionReport: + newValue = result.NewRegressionReport + resultValue["count"]; + newResult = result with { NewRegressionReport = newValue }; + break; + case JobResultType.NewCrashDump: + newValue = result.NewCrashDump + resultValue["count"]; + newResult = result with { NewCrashDump = newValue }; + break; + case JobResultType.CoverageData: + double newCovered = resultValue["covered"]; + double newTotalCovered = resultValue["features"]; + double newCoverageRate = resultValue["rate"]; + newResult = result with { InstructionsCovered = newCovered, TotalInstructions = newTotalCovered, CoverageRate = newCoverageRate }; + break; + case JobResultType.RuntimeStats: + double newTotalIterations = resultValue["total_count"]; + newResult = result with { IterationCount = newTotalIterations }; + break; + default: + _logTracer.LogWarning($"Invalid Field {type}."); + break; + } + _logTracer.LogInformation($"Attempting to log new result: {newResult}"); + return newResult; + } + + private async Async.Task TryUpdate(Job job, JobResultType resultType, Dictionary resultValue) { + var jobId = job.JobId; + + var jobResult = await GetJobResult(jobId); + + if (jobResult == null) { + _logTracer.LogInformation("Creating new JobResult for Job {JobId}", jobId); + + var entry = new JobResult(JobId: jobId, Project: job.Config.Project, Name: job.Config.Name); + + jobResult = UpdateResult(entry, resultType, resultValue); + + var r = await Insert(jobResult); + if (!r.IsOk) { + throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); + } + _logTracer.LogInformation("created job result {JobId}", jobResult.JobId); + } else { + _logTracer.LogInformation("Updating existing JobResult entry for Job {JobId}", jobId); + + jobResult = UpdateResult(jobResult, resultType, resultValue); + + var r = await Update(jobResult); + if (!r.IsOk) { + throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}"); + } + _logTracer.LogInformation("updated job result {JobId}", jobResult.JobId); + } + + return true; + } + + public async Async.Task CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary resultValue) { + + var job = await _context.JobOperations.Get(jobId); + if (job == null) { + return OneFuzzResultVoid.Error(ErrorCode.INVALID_REQUEST, "invalid job"); + } + + var success = false; + try { + _logTracer.LogInformation("attempt to update job result {JobId}", job.JobId); + var policy = Policy.Handle().WaitAndRetryAsync(50, _ => new TimeSpan(0, 0, 5)); + await policy.ExecuteAsync(async () => { + success = await TryUpdate(job, resultType, resultValue); + _logTracer.LogInformation("attempt {success}", success); + }); + return OneFuzzResultVoid.Ok; + } catch (Exception e) { + return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, new string[] { + $"Unexpected failure when attempting to update job result for {job.JobId}", + $"Exception: {e}" + }); + } + } +} + diff --git a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs index d877bfddbb..03c6322663 100644 --- a/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs +++ b/src/ApiService/ApiService/onefuzzlib/OnefuzzContext.cs @@ -19,6 +19,7 @@ public interface IOnefuzzContext { IExtensions Extensions { get; } IIpOperations IpOperations { get; } IJobOperations JobOperations { get; } + IJobResultOperations JobResultOperations { get; } ILogAnalytics LogAnalytics { get; } INodeMessageOperations NodeMessageOperations { get; } INodeOperations NodeOperations { get; } @@ -83,6 +84,7 @@ public OnefuzzContext(IServiceProvider serviceProvider) { public IVmOperations VmOperations => _serviceProvider.GetRequiredService(); public ISecretsOperations SecretsOperations => _serviceProvider.GetRequiredService(); public IJobOperations JobOperations => _serviceProvider.GetRequiredService(); + public IJobResultOperations JobResultOperations => _serviceProvider.GetRequiredService(); public IScheduler Scheduler => _serviceProvider.GetRequiredService(); public IConfig Config => _serviceProvider.GetRequiredService(); public ILogAnalytics LogAnalytics => _serviceProvider.GetRequiredService(); diff --git a/src/ApiService/IntegrationTests/Fakes/TestContext.cs b/src/ApiService/IntegrationTests/Fakes/TestContext.cs index c46ff5fce7..66d121e746 100644 --- a/src/ApiService/IntegrationTests/Fakes/TestContext.cs +++ b/src/ApiService/IntegrationTests/Fakes/TestContext.cs @@ -32,6 +32,7 @@ public TestContext(IHttpClientFactory httpClientFactory, OneFuzzLoggerProvider p TaskOperations = new TaskOperations(provider.CreateLogger(), Cache, this); NodeOperations = new NodeOperations(provider.CreateLogger(), this); JobOperations = new JobOperations(provider.CreateLogger(), this); + JobResultOperations = new JobResultOperations(provider.CreateLogger(), this); NodeTasksOperations = new NodeTasksOperations(provider.CreateLogger(), this); TaskEventOperations = new TaskEventOperations(provider.CreateLogger(), this); NodeMessageOperations = new NodeMessageOperations(provider.CreateLogger(), this); @@ -57,6 +58,7 @@ public Async.Task InsertAll(params EntityBase[] objs) Node n => NodeOperations.Insert(n), Pool p => PoolOperations.Insert(p), Job j => JobOperations.Insert(j), + JobResult jr => JobResultOperations.Insert(jr), Repro r => ReproOperations.Insert(r), Scaleset ss => ScalesetOperations.Insert(ss), NodeTasks nt => NodeTasksOperations.Insert(nt), @@ -84,6 +86,7 @@ public Async.Task InsertAll(params EntityBase[] objs) public ITaskOperations TaskOperations { get; } public IJobOperations JobOperations { get; } + public IJobResultOperations JobResultOperations { get; } public INodeOperations NodeOperations { get; } public INodeTasksOperations NodeTasksOperations { get; } public ITaskEventOperations TaskEventOperations { get; } diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index 6136357d65..eb35241201 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -2138,6 +2138,7 @@ dependencies = [ "log", "nix", "notify", + "onefuzz-result", "onefuzz-telemetry", "pete", "pretty_assertions", @@ -2212,6 +2213,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "onefuzz-result" +version = "0.2.0" +dependencies = [ + "anyhow", + "async-trait", + "log", + "onefuzz-telemetry", + "reqwest", + "serde", + "storage-queue", + "uuid", +] + [[package]] name = "onefuzz-task" version = "0.2.0" @@ -2241,6 +2256,7 @@ dependencies = [ "num_cpus", "onefuzz", "onefuzz-file-format", + "onefuzz-result", "onefuzz-telemetry", "path-absolutize", "pretty_assertions", diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index 2f4cea41a4..ce01ae880c 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -10,6 +10,7 @@ members = [ "onefuzz", "onefuzz-task", "onefuzz-agent", + "onefuzz-result", "onefuzz-file-format", "onefuzz-telemetry", "reqwest-retry", diff --git a/src/agent/onefuzz-agent/src/config.rs b/src/agent/onefuzz-agent/src/config.rs index 87edfb2c1b..fc623e72af 100644 --- a/src/agent/onefuzz-agent/src/config.rs +++ b/src/agent/onefuzz-agent/src/config.rs @@ -34,6 +34,8 @@ pub struct StaticConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -71,6 +73,8 @@ struct RawStaticConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_id: Uuid, #[serde(default = "default_as_true")] @@ -117,6 +121,7 @@ impl StaticConfig { microsoft_telemetry_key: config.microsoft_telemetry_key, instance_telemetry_key: config.instance_telemetry_key, heartbeat_queue: config.heartbeat_queue, + job_result_queue: config.job_result_queue, instance_id: config.instance_id, managed: config.managed, machine_identity, @@ -152,6 +157,12 @@ impl StaticConfig { None }; + let job_result_queue = if let Ok(key) = std::env::var("ONEFUZZ_JOB_RESULT") { + Some(Url::parse(&key)?) + } else { + None + }; + let instance_telemetry_key = if let Ok(key) = std::env::var("ONEFUZZ_INSTANCE_TELEMETRY_KEY") { Some(InstanceTelemetryKey::new(Uuid::parse_str(&key)?)) @@ -183,6 +194,7 @@ impl StaticConfig { instance_telemetry_key, microsoft_telemetry_key, heartbeat_queue, + job_result_queue, instance_id, managed: !is_unmanaged, machine_identity, diff --git a/src/agent/onefuzz-agent/src/log_uploader.rs b/src/agent/onefuzz-agent/src/log_uploader.rs index 6bccc0bef2..d424013421 100644 --- a/src/agent/onefuzz-agent/src/log_uploader.rs +++ b/src/agent/onefuzz-agent/src/log_uploader.rs @@ -210,32 +210,3 @@ async fn sync_file( blob_client.append_block(Body::from(f)).await?; Ok(len) } - -#[cfg(test)] -mod tests { - use std::io::Seek; - - use anyhow::Result; - use tokio::io::{AsyncReadExt, AsyncSeekExt}; - - #[allow(clippy::unused_io_amount)] - #[tokio::test] - #[ignore] - - async fn test_seek_behavior() -> Result<()> { - let path = "C:\\temp\\test.ps1"; - let mut std_file = std::fs::File::open(path)?; - std_file.seek(std::io::SeekFrom::Start(3))?; - - let mut tokio_file = tokio::fs::File::from_std(std_file); - - let buf = &mut [0u8; 5]; - tokio_file.read(buf).await?; - println!("******** buf {:?}", buf); - tokio_file.seek(std::io::SeekFrom::Start(0)).await?; - tokio_file.read(buf).await?; - println!("******** buf {:?}", buf); - - Ok(()) - } -} diff --git a/src/agent/onefuzz-agent/src/work.rs b/src/agent/onefuzz-agent/src/work.rs index b55d1d86a1..d0222744a7 100644 --- a/src/agent/onefuzz-agent/src/work.rs +++ b/src/agent/onefuzz-agent/src/work.rs @@ -91,7 +91,10 @@ impl WorkSet { pub fn setup_dir(&self) -> Result { let root = self.get_root_folder()?; - self.setup_url.as_path(root) + // Putting the setup container at the root for backward compatibility. + // The path of setup folder can be used as part of the deduplication logic in the bug filing service + let setup_root = root.parent().ok_or_else(|| anyhow!("Invalid root"))?; + self.setup_url.as_path(setup_root) } pub fn extra_setup_dir(&self) -> Result> { diff --git a/src/agent/onefuzz-result/Cargo.toml b/src/agent/onefuzz-result/Cargo.toml new file mode 100644 index 0000000000..7c7de6615c --- /dev/null +++ b/src/agent/onefuzz-result/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "onefuzz-result" +version = "0.2.0" +authors = ["fuzzing@microsoft.com"] +edition = "2021" +publish = false +license = "MIT" + +[dependencies] +anyhow = { version = "1.0", features = ["backtrace"] } +async-trait = "0.1" +reqwest = "0.11" +serde = "1.0" +storage-queue = { path = "../storage-queue" } +uuid = { version = "1.4", features = ["serde", "v4"] } +onefuzz-telemetry = { path = "../onefuzz-telemetry" } +log = "0.4" + diff --git a/src/agent/onefuzz-result/src/job_result.rs b/src/agent/onefuzz-result/src/job_result.rs new file mode 100644 index 0000000000..b305eca2cb --- /dev/null +++ b/src/agent/onefuzz-result/src/job_result.rs @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use anyhow::Result; +use async_trait::async_trait; +use onefuzz_telemetry::warn; +use reqwest::Url; +use serde::{self, Deserialize, Serialize}; +use std::collections::HashMap; +use std::sync::Arc; +use storage_queue::QueueClient; +use uuid::Uuid; + +#[derive(Debug, Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] +#[serde(tag = "type")] +pub enum JobResultData { + NewCrashingInput, + NoReproCrashingInput, + NewReport, + NewUniqueReport, + NewRegressionReport, + NewCoverage, + NewCrashDump, + CoverageData, + RuntimeStats, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +struct JobResult { + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, + data: JobResultData, + value: HashMap, +} + +#[derive(Clone)] +pub struct TaskContext { + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, +} + +pub struct JobResultContext { + pub state: TaskContext, + pub queue_client: QueueClient, +} + +pub struct JobResultClient { + pub context: Arc>, +} + +impl JobResultClient { + pub fn init_job_result( + context: TaskContext, + queue_url: Url, + ) -> Result> + where + TaskContext: Send + Sync + 'static, + { + let context = Arc::new(JobResultContext { + state: context, + queue_client: QueueClient::new(queue_url)?, + }); + + Ok(JobResultClient { context }) + } +} + +pub type TaskJobResultClient = JobResultClient; + +pub async fn init_job_result( + queue_url: Url, + task_id: Uuid, + job_id: Uuid, + machine_id: Uuid, + machine_name: String, +) -> Result { + let hb = JobResultClient::init_job_result( + TaskContext { + task_id, + job_id, + machine_id, + machine_name, + }, + queue_url, + )?; + Ok(hb) +} + +#[async_trait] +pub trait JobResultSender { + async fn send_direct(&self, data: JobResultData, value: HashMap); +} + +#[async_trait] +impl JobResultSender for TaskJobResultClient { + async fn send_direct(&self, data: JobResultData, value: HashMap) { + let task_id = self.context.state.task_id; + let job_id = self.context.state.job_id; + let machine_id = self.context.state.machine_id; + let machine_name = self.context.state.machine_name.clone(); + + let _ = self + .context + .queue_client + .enqueue(JobResult { + task_id, + job_id, + machine_id, + machine_name, + data, + value, + }) + .await; + } +} + +#[async_trait] +impl JobResultSender for Option { + async fn send_direct(&self, data: JobResultData, value: HashMap) { + match self { + Some(client) => client.send_direct(data, value).await, + None => warn!("Failed to send Job Result message data from agent."), + } + } +} diff --git a/src/agent/onefuzz-result/src/lib.rs b/src/agent/onefuzz-result/src/lib.rs new file mode 100644 index 0000000000..dae666ca9a --- /dev/null +++ b/src/agent/onefuzz-result/src/lib.rs @@ -0,0 +1,4 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod job_result; diff --git a/src/agent/onefuzz-task/Cargo.toml b/src/agent/onefuzz-task/Cargo.toml index 4c6d48d8a7..4b3e8e8c43 100644 --- a/src/agent/onefuzz-task/Cargo.toml +++ b/src/agent/onefuzz-task/Cargo.toml @@ -47,6 +47,7 @@ serde_json = "1.0" serde_yaml = "0.9.21" onefuzz = { path = "../onefuzz" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } +onefuzz-result = { path = "../onefuzz-result" } path-absolutize = "3.1" reqwest-retry = { path = "../reqwest-retry" } strum = "0.25" diff --git a/src/agent/onefuzz-task/src/local/cmd.rs b/src/agent/onefuzz-task/src/local/cmd.rs index f1df4002c0..cb800d445e 100644 --- a/src/agent/onefuzz-task/src/local/cmd.rs +++ b/src/agent/onefuzz-task/src/local/cmd.rs @@ -4,11 +4,7 @@ use super::{create_template, template}; #[cfg(any(target_os = "linux", target_os = "windows"))] use crate::local::coverage; -use crate::local::{ - common::add_common_config, generic_analysis, generic_crash_report, generic_generator, - libfuzzer, libfuzzer_crash_report, libfuzzer_fuzz, libfuzzer_merge, libfuzzer_regression, - libfuzzer_test_input, radamsa, test_input, tui::TerminalUi, -}; +use crate::local::{common::add_common_config, libfuzzer_fuzz, tui::TerminalUi}; use anyhow::{Context, Result}; use clap::{Arg, ArgAction, Command}; @@ -20,19 +16,9 @@ use tokio::{select, time::timeout}; #[derive(Debug, PartialEq, Eq, EnumString, IntoStaticStr, EnumIter)] #[strum(serialize_all = "kebab-case")] enum Commands { - Radamsa, #[cfg(any(target_os = "linux", target_os = "windows"))] Coverage, LibfuzzerFuzz, - LibfuzzerMerge, - LibfuzzerCrashReport, - LibfuzzerTestInput, - LibfuzzerRegression, - Libfuzzer, - CrashReport, - Generator, - Analysis, - TestInput, Template, CreateTemplate, } @@ -68,23 +54,7 @@ pub async fn run(args: clap::ArgMatches) -> Result<()> { match command { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::run(&sub_args, event_sender).await, - Commands::Radamsa => radamsa::run(&sub_args, event_sender).await, - Commands::LibfuzzerCrashReport => { - libfuzzer_crash_report::run(&sub_args, event_sender).await - } Commands::LibfuzzerFuzz => libfuzzer_fuzz::run(&sub_args, event_sender).await, - Commands::LibfuzzerMerge => libfuzzer_merge::run(&sub_args, event_sender).await, - Commands::LibfuzzerTestInput => { - libfuzzer_test_input::run(&sub_args, event_sender).await - } - Commands::LibfuzzerRegression => { - libfuzzer_regression::run(&sub_args, event_sender).await - } - Commands::Libfuzzer => libfuzzer::run(&sub_args, event_sender).await, - Commands::CrashReport => generic_crash_report::run(&sub_args, event_sender).await, - Commands::Generator => generic_generator::run(&sub_args, event_sender).await, - Commands::Analysis => generic_analysis::run(&sub_args, event_sender).await, - Commands::TestInput => test_input::run(&sub_args, event_sender).await, Commands::Template => { let config = sub_args .get_one::("config") @@ -141,17 +111,7 @@ pub fn args(name: &'static str) -> Command { let app = match subcommand { #[cfg(any(target_os = "linux", target_os = "windows"))] Commands::Coverage => coverage::args(subcommand.into()), - Commands::Radamsa => radamsa::args(subcommand.into()), - Commands::LibfuzzerCrashReport => libfuzzer_crash_report::args(subcommand.into()), Commands::LibfuzzerFuzz => libfuzzer_fuzz::args(subcommand.into()), - Commands::LibfuzzerMerge => libfuzzer_merge::args(subcommand.into()), - Commands::LibfuzzerTestInput => libfuzzer_test_input::args(subcommand.into()), - Commands::LibfuzzerRegression => libfuzzer_regression::args(subcommand.into()), - Commands::Libfuzzer => libfuzzer::args(subcommand.into()), - Commands::CrashReport => generic_crash_report::args(subcommand.into()), - Commands::Generator => generic_generator::args(subcommand.into()), - Commands::Analysis => generic_analysis::args(subcommand.into()), - Commands::TestInput => test_input::args(subcommand.into()), Commands::Template => Command::new("template") .about("uses the template to generate a run") .args(vec![Arg::new("config") diff --git a/src/agent/onefuzz-task/src/local/common.rs b/src/agent/onefuzz-task/src/local/common.rs index f8d7949e80..17940d799f 100644 --- a/src/agent/onefuzz-task/src/local/common.rs +++ b/src/agent/onefuzz-task/src/local/common.rs @@ -26,20 +26,10 @@ pub const INPUTS_DIR: &str = "inputs_dir"; pub const CRASHES_DIR: &str = "crashes_dir"; pub const CRASHDUMPS_DIR: &str = "crashdumps_dir"; pub const TARGET_WORKERS: &str = "target_workers"; -pub const REPORTS_DIR: &str = "reports_dir"; -pub const NO_REPRO_DIR: &str = "no_repro_dir"; pub const TARGET_TIMEOUT: &str = "target_timeout"; -pub const CHECK_RETRY_COUNT: &str = "check_retry_count"; -pub const DISABLE_CHECK_QUEUE: &str = "disable_check_queue"; -pub const UNIQUE_REPORTS_DIR: &str = "unique_reports_dir"; pub const COVERAGE_DIR: &str = "coverage_dir"; pub const READONLY_INPUTS: &str = "readonly_inputs_dir"; -pub const CHECK_ASAN_LOG: &str = "check_asan_log"; -pub const TOOLS_DIR: &str = "tools_dir"; -pub const RENAME_OUTPUT: &str = "rename_output"; pub const CHECK_FUZZER_HELP: &str = "check_fuzzer_help"; -pub const DISABLE_CHECK_DEBUGGER: &str = "disable_check_debugger"; -pub const REGRESSION_REPORTS_DIR: &str = "regression_reports_dir"; pub const TARGET_EXE: &str = "target_exe"; pub const TARGET_ENV: &str = "target_env"; @@ -47,17 +37,6 @@ pub const TARGET_OPTIONS: &str = "target_options"; // pub const SUPERVISOR_EXE: &str = "supervisor_exe"; // pub const SUPERVISOR_ENV: &str = "supervisor_env"; // pub const SUPERVISOR_OPTIONS: &str = "supervisor_options"; -pub const GENERATOR_EXE: &str = "generator_exe"; -pub const GENERATOR_ENV: &str = "generator_env"; -pub const GENERATOR_OPTIONS: &str = "generator_options"; - -pub const ANALYZER_EXE: &str = "analyzer_exe"; -pub const ANALYZER_OPTIONS: &str = "analyzer_options"; -pub const ANALYZER_ENV: &str = "analyzer_env"; -pub const ANALYSIS_DIR: &str = "analysis_dir"; -pub const ANALYSIS_INPUTS: &str = "analysis_inputs"; -pub const ANALYSIS_UNIQUE_INPUTS: &str = "analysis_unique_inputs"; -pub const PRESERVE_EXISTING_OUTPUTS: &str = "preserve_existing_outputs"; pub const CREATE_JOB_DIR: &str = "create_job_dir"; @@ -66,7 +45,6 @@ const WAIT_FOR_DIR_DELAY: Duration = Duration::from_secs(1); pub enum CmdType { Target, - Generator, // Supervisor, } @@ -90,7 +68,6 @@ pub fn get_cmd_exe(cmd_type: CmdType, args: &clap::ArgMatches) -> Result let name = match cmd_type { CmdType::Target => TARGET_EXE, // CmdType::Supervisor => SUPERVISOR_EXE, - CmdType::Generator => GENERATOR_EXE, }; args.get_one::(name) @@ -102,7 +79,6 @@ pub fn get_cmd_arg(cmd_type: CmdType, args: &clap::ArgMatches) -> Vec { let name = match cmd_type { CmdType::Target => TARGET_OPTIONS, // CmdType::Supervisor => SUPERVISOR_OPTIONS, - CmdType::Generator => GENERATOR_OPTIONS, }; args.get_many::(name) @@ -115,7 +91,6 @@ pub fn get_cmd_env(cmd_type: CmdType, args: &clap::ArgMatches) -> Result TARGET_ENV, // CmdType::Supervisor => SUPERVISOR_ENV, - CmdType::Generator => GENERATOR_ENV, }; get_hash_map(args, env_name) } @@ -265,6 +240,7 @@ pub async fn build_local_context( }, instance_telemetry_key: None, heartbeat_queue: None, + job_result_queue: None, microsoft_telemetry_key: None, logs: None, min_available_memory_mb: 0, diff --git a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml index 7210893809..aba02c7991 100644 --- a/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml +++ b/src/agent/onefuzz-task/src/local/example_templates/libfuzzer_basic.yml @@ -5,28 +5,31 @@ # 2. Install llvm and export LLVM_SYMBOLIZER_PATH like we do in setup.sh +required_args: &required_args + target_exe: "REPLACE_ME" # The path to your target + inputs: &inputs "REPLACE_ME" # A folder containining your inputs + crashes: &crashes "REPLACE_ME" # The folder where you want the crashing inputs to be output + crashdumps: "REPLACE_ME" # The folder where you want the crash dumps to be output + coverage: "REPLACE_ME" # The folder where you want the code coverage to be output + regression_reports: "REPLACE_ME" # The folder where you want the regression reports to be output + target_args: &target_args + <<: *required_args target_env: {} - target_exe: "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\fuzz.exe" target_options: [] -inputs: &inputs "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\seeds" - tasks: - type: LibFuzzer <<: *target_args - inputs: *inputs - crashes: &crash "./crashes" readonly_inputs: [] check_fuzzer_help: true - - type: "Report" + - type: LibfuzzerRegression <<: *target_args - input_queue: *crash - crashes: *crash - reports: "./reports" - unique_reports: "./unique_reports" - no_repro: "./no_repro" + + - type: "LibfuzzerCrashReport" + <<: *target_args + input_queue: *crashes check_fuzzer_help: true - type: "Coverage" @@ -35,4 +38,11 @@ tasks: - "{input}" input_queue: *inputs readonly_inputs: [*inputs] - coverage: "./coverage" + + # The analysis task is optional in the libfuzzer_basic template + # - type: Analysis + # <<: *target_args + # analysis: "REPLACE_ME" # The folder where you want the analysis results to be output + # analyzer_exe: "REPLACE_ME" + # analyzer_options: [] + # analyzer_env: {} diff --git a/src/agent/onefuzz-task/src/local/generic_analysis.rs b/src/agent/onefuzz-task/src/local/generic_analysis.rs index a1bc714d1a..cbb31a1ff9 100644 --- a/src/agent/onefuzz-task/src/local/generic_analysis.rs +++ b/src/agent/onefuzz-task/src/local/generic_analysis.rs @@ -3,139 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_exe, get_hash_map, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, ANALYSIS_DIR, ANALYZER_ENV, ANALYZER_EXE, ANALYZER_OPTIONS, - CRASHES_DIR, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TOOLS_DIR, - UNIQUE_REPORTS_DIR, - }, - tasks::{ - analysis::generic::{run as run_analysis, Config}, - config::CommonConfig, - }, -}; +use crate::tasks::config::CommonConfig; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, Command}; -use flume::Sender; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_analysis_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_options = get_cmd_arg(CmdType::Target, args); - - let analyzer_exe = args - .get_one::(ANALYZER_EXE) - .cloned() - .ok_or_else(|| format_err!("expected {ANALYZER_EXE}"))?; - - let analyzer_options = args - .get_many::(ANALYZER_OPTIONS) - .unwrap_or_default() - .map(|x| x.to_string()) - .collect(); - - let analyzer_env = get_hash_map(args, ANALYZER_ENV)?; - let analysis = get_synced_dir(ANALYSIS_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args)?; - let crashes = if input_queue.is_none() { - get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)? - } else { - None - }; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let config = Config { - analyzer_exe, - analyzer_options, - analyzer_env, - target_exe, - target_options, - input_queue, - crashes, - analysis, - tools: Some(tools), - reports, - unique_reports, - no_repro, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_analysis_config(args, None, context.common_config.clone(), event_sender)?; - run_analysis(config).await -} - -pub fn build_shared_args(required_task: bool) -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV) - .long(TARGET_ENV) - .requires(TARGET_EXE) - .num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .default_value("{input}") - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(ANALYZER_OPTIONS) - .long(ANALYZER_OPTIONS) - .requires(ANALYZER_EXE) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(ANALYZER_ENV) - .long(ANALYZER_ENV) - .requires(ANALYZER_EXE) - .num_args(0..), - Arg::new(TOOLS_DIR) - .long(TOOLS_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(ANALYZER_EXE) - .long(ANALYZER_EXE) - .requires(ANALYSIS_DIR) - .requires(CRASHES_DIR) - .required(required_task), - Arg::new(ANALYSIS_DIR) - .long(ANALYSIS_DIR) - .requires(ANALYZER_EXE) - .requires(CRASHES_DIR) - .required(required_task), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generic analysis") - .args(&build_shared_args(true)) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Analysis { analyzer_exe: String, @@ -146,7 +20,7 @@ pub struct Analysis { input_queue: Option, crashes: Option, analysis: PathBuf, - tools: PathBuf, + tools: Option, reports: Option, unique_reports: Option, no_repro: Option, @@ -191,9 +65,10 @@ impl Template for Analysis { .and_then(|path| context.to_monitored_sync_dir("crashes", path).ok()), analysis: context.to_monitored_sync_dir("analysis", self.analysis.clone())?, - tools: context - .to_monitored_sync_dir("tools", self.tools.clone()) - .ok(), + tools: self + .tools + .as_ref() + .and_then(|path| context.to_monitored_sync_dir("tools", path).ok()), reports: self .reports diff --git a/src/agent/onefuzz-task/src/local/generic_crash_report.rs b/src/agent/onefuzz-task/src/local/generic_crash_report.rs index dc2773b341..91dec1ae44 100644 --- a/src/agent/onefuzz-task/src/local/generic_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/generic_crash_report.rs @@ -3,150 +3,14 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, CRASHES_DIR, - DISABLE_CHECK_DEBUGGER, DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, - TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - report::generic::{Config, ReportTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_report_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - - let crashes = Some(get_synced_dir( - CRASHES_DIR, - common.job_id, - common.task_id, - args, - )?) - .monitor_count(&event_sender)?; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let unique_reports = Some(get_synced_dir( - UNIQUE_REPORTS_DIR, - common.job_id, - common.task_id, - args, - )?) - .monitor_count(&event_sender)?; - - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_asan_log, - check_debugger, - check_retry_count, - check_queue, - crashes, - minimized_stack_depth: None, - input_queue, - no_repro, - reports, - unique_reports, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; - ReportTask::new(config).managed_run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .value_parser(value_parser!(PathBuf)) - .required(true), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .default_value("30"), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(DISABLE_CHECK_QUEUE) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_QUEUE), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_DEBUGGER), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generic crash report") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct CrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/generic_generator.rs b/src/agent/onefuzz-task/src/local/generic_generator.rs index 68490cd29d..3c26af4cf8 100644 --- a/src/agent/onefuzz-task/src/local/generic_generator.rs +++ b/src/agent/onefuzz-task/src/local/generic_generator.rs @@ -3,154 +3,14 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, - get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, - CRASHES_DIR, DISABLE_CHECK_DEBUGGER, GENERATOR_ENV, GENERATOR_EXE, GENERATOR_OPTIONS, - READONLY_INPUTS, RENAME_OUTPUT, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, - TOOLS_DIR, - }, - tasks::{ - config::CommonConfig, - fuzz::generator::{Config, GeneratorTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; use super::template::{RunContext, Template}; -pub fn build_fuzz_config( - args: &clap::ArgMatches, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_options = get_cmd_arg(CmdType::Target, args); - let target_env = get_cmd_env(CmdType::Target, args)?; - - let generator_exe = get_cmd_exe(CmdType::Generator, args)?; - let generator_options = get_cmd_arg(CmdType::Generator, args); - let generator_env = get_cmd_env(CmdType::Generator, args)?; - let readonly_inputs = get_synced_dirs(READONLY_INPUTS, common.job_id, common.task_id, args)? - .into_iter() - .map(|sd| sd.monitor_count(&event_sender)) - .collect::>>()?; - - let rename_output = args.get_flag(RENAME_OUTPUT); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let target_timeout = Some( - args.get_one::(TARGET_TIMEOUT) - .copied() - .expect("has a default"), - ); - - let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let ensemble_sync_delay = None; - - let config = Config { - generator_exe, - generator_env, - generator_options, - readonly_inputs, - crashes, - tools, - target_exe, - target_env, - target_options, - target_timeout, - check_asan_log, - check_debugger, - check_retry_count, - rename_output, - ensemble_sync_delay, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_fuzz_config(args, context.common_config.clone(), event_sender)?; - GeneratorTask::new(config).run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(GENERATOR_EXE) - .long(GENERATOR_EXE) - .default_value("radamsa") - .required(true), - Arg::new(GENERATOR_ENV).long(GENERATOR_ENV).num_args(0..), - Arg::new(GENERATOR_OPTIONS) - .long(GENERATOR_OPTIONS) - .value_delimiter(' ') - .default_value("-H sha256 -o {generated_inputs}/input-%h.%s -n 100 -r {input_corpus}") - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .required(true) - .long(CRASHES_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(READONLY_INPUTS) - .required(true) - .num_args(1..) - .value_parser(value_parser!(PathBuf)) - .long(READONLY_INPUTS), - Arg::new(TOOLS_DIR) - .long(TOOLS_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(RENAME_OUTPUT) - .action(ArgAction::SetTrue) - .long(RENAME_OUTPUT), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .default_value("30"), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_DEBUGGER), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only generator fuzzing task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct Generator { generator_exe: String, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer.rs b/src/agent/onefuzz-task/src/local/libfuzzer.rs index 12abae88b0..472a6ae9e8 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer.rs @@ -1,168 +1,19 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#[cfg(any(target_os = "linux", target_os = "windows"))] -use crate::{ - local::{common::COVERAGE_DIR, coverage, coverage::build_shared_args as build_coverage_args}, - tasks::coverage::generic::CoverageTask, -}; -use crate::{ - local::{ - common::{ - build_local_context, wait_for_dir, DirectoryMonitorQueue, UiEvent, ANALYZER_EXE, - REGRESSION_REPORTS_DIR, UNIQUE_REPORTS_DIR, - }, - generic_analysis::{build_analysis_config, build_shared_args as build_analysis_args}, - libfuzzer_crash_report::{build_report_config, build_shared_args as build_crash_args}, - libfuzzer_fuzz::{build_fuzz_config, build_shared_args as build_fuzz_args}, - libfuzzer_regression::{ - build_regression_config, build_shared_args as build_regression_args, - }, - }, - tasks::{ - analysis::generic::run as run_analysis, - config::CommonConfig, - fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, - regression::libfuzzer::LibFuzzerRegressionTask, - report::libfuzzer_report::ReportTask, - utils::default_bool_true, - }, +use crate::tasks::{ + config::CommonConfig, + fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask}, + utils::default_bool_true, }; use anyhow::Result; use async_trait::async_trait; -use clap::Command; -use flume::Sender; -use onefuzz::{syncdir::SyncedDir, utils::try_wait_all_join_handles}; +use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; -use std::{ - collections::{HashMap, HashSet}, - path::PathBuf, -}; -use tokio::task::spawn; -use uuid::Uuid; +use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; - let crash_dir = fuzz_config - .crashes - .remote_url()? - .as_file_path() - .expect("invalid crash dir remote location"); - - let fuzzer = LibFuzzerFuzzTask::new(fuzz_config)?; - let mut task_handles = vec![]; - - let fuzz_task = spawn(async move { fuzzer.run().await }); - - wait_for_dir(&crash_dir).await?; - - task_handles.push(fuzz_task); - - if args.contains_id(UNIQUE_REPORTS_DIR) { - let crash_report_input_monitor = - DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; - - let report_config = build_report_config( - args, - Some(crash_report_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - - let mut report = ReportTask::new(report_config); - let report_task = spawn(async move { report.managed_run().await }); - - task_handles.push(report_task); - task_handles.push(crash_report_input_monitor.handle); - } - - #[cfg(any(target_os = "linux", target_os = "windows"))] - if args.contains_id(COVERAGE_DIR) { - let coverage_input_monitor = - DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?; - let coverage_config = coverage::build_coverage_config( - args, - true, - Some(coverage_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - - let mut coverage = CoverageTask::new(coverage_config); - let coverage_task = spawn(async move { coverage.run().await }); - - task_handles.push(coverage_task); - task_handles.push(coverage_input_monitor.handle); - } - - if args.contains_id(ANALYZER_EXE) { - let analysis_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir).await?; - let analysis_config = build_analysis_config( - args, - Some(analysis_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender.clone(), - )?; - let analysis_task = spawn(async move { run_analysis(analysis_config).await }); - - task_handles.push(analysis_task); - task_handles.push(analysis_input_monitor.handle); - } - - if args.contains_id(REGRESSION_REPORTS_DIR) { - let regression_config = build_regression_config( - args, - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender, - )?; - let regression = LibFuzzerRegressionTask::new(regression_config); - let regression_task = spawn(async move { regression.run().await }); - task_handles.push(regression_task); - } - - try_wait_all_join_handles(task_handles).await?; - - Ok(()) -} - -pub fn args(name: &'static str) -> Command { - let mut app = Command::new(name).about("run a local libfuzzer & crash reporting task"); - - let mut used = HashSet::new(); - - for args in &[ - build_fuzz_args(), - build_crash_args(), - build_analysis_args(false), - #[cfg(any(target_os = "linux", target_os = "windows"))] - build_coverage_args(true), - build_regression_args(false), - ] { - for arg in args { - if used.insert(arg.get_id()) { - app = app.arg(arg); - } - } - } - - app -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibFuzzer { inputs: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs index be3326f749..9de1fc66ce 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_crash_report.rs @@ -3,139 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, CRASHES_DIR, - DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - report::libfuzzer_report::{Config, ReportTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; - -pub fn build_report_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default"); - - let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE); - - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - - let crashes = if input_queue.is_none() { crashes } else { None }; - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_retry_count, - check_fuzzer_help, - minimized_stack_depth: None, - input_queue, - check_queue, - crashes, - reports, - no_repro, - unique_reports, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_report_config(args, None, context.common_config.clone(), event_sender)?; - ReportTask::new(config).managed_run().await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)) - .long(TARGET_TIMEOUT), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(DISABLE_CHECK_QUEUE) - .action(ArgAction::SetTrue) - .long(DISABLE_CHECK_QUEUE), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer crash report task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerCrashReport { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs index 1e128f0dfc..d4915e6b4c 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_merge.rs @@ -3,97 +3,15 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, - get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, ANALYSIS_INPUTS, - ANALYSIS_UNIQUE_INPUTS, CHECK_FUZZER_HELP, INPUTS_DIR, PRESERVE_EXISTING_OUTPUTS, - TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - }, - tasks::{ - config::CommonConfig, - merge::libfuzzer_merge::{spawn, Config}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use futures::future::OptionFuture; use onefuzz::syncdir::SyncedDir; use schemars::JsonSchema; -use storage_queue::QueueClient; use super::template::{RunContext, Template}; -pub fn build_merge_config( - args: &clap::ArgMatches, - input_queue: Option, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - let inputs = get_synced_dirs(ANALYSIS_INPUTS, common.job_id, common.task_id, args)? - .into_iter() - .map(|sd| sd.monitor_count(&event_sender)) - .collect::>>()?; - let unique_inputs = - get_synced_dir(ANALYSIS_UNIQUE_INPUTS, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let preserve_existing_outputs = args - .get_one::(PRESERVE_EXISTING_OUTPUTS) - .copied() - .unwrap_or_default(); - - let config = Config { - target_exe, - target_env, - target_options, - input_queue, - inputs, - unique_inputs, - preserve_existing_outputs, - check_fuzzer_help, - common, - }; - - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_merge_config(args, None, context.common_config.clone(), event_sender)?; - spawn(config).await -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - Arg::new(INPUTS_DIR) - .long(INPUTS_DIR) - .value_parser(value_parser!(PathBuf)) - .num_args(0..), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer crash report task") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerMerge { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs index b8a5766e10..b53fb84c22 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_regression.rs @@ -3,145 +3,13 @@ use std::{collections::HashMap, path::PathBuf}; -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType, - SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, COVERAGE_DIR, - CRASHES_DIR, NO_REPRO_DIR, REGRESSION_REPORTS_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, - TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR, - }, - tasks::{ - config::CommonConfig, - regression::libfuzzer::{Config, LibFuzzerRegressionTask}, - utils::default_bool_true, - }, -}; +use crate::tasks::{config::CommonConfig, utils::default_bool_true}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use schemars::JsonSchema; use super::template::{RunContext, Template}; -const REPORT_NAMES: &str = "report_names"; - -pub fn build_regression_config( - args: &clap::ArgMatches, - common: CommonConfig, - event_sender: Option>, -) -> Result { - let target_exe = get_cmd_exe(CmdType::Target, args)?.into(); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let regression_reports = - get_synced_dir(REGRESSION_REPORTS_DIR, common.job_id, common.task_id, args)? - .monitor_count(&event_sender)?; - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default value"); - - let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args) - .ok() - .monitor_count(&event_sender)?; - - let report_list: Option> = args - .get_many::(REPORT_NAMES) - .map(|x| x.cloned().collect()); - - let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP); - - let config = Config { - target_exe, - target_env, - target_options, - target_timeout, - check_fuzzer_help, - check_retry_count, - crashes, - regression_reports, - reports, - no_repro, - unique_reports, - readonly_inputs: None, - report_list, - minimized_stack_depth: None, - common, - }; - Ok(config) -} - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let config = build_regression_config(args, context.common_config.clone(), event_sender)?; - LibFuzzerRegressionTask::new(config).run().await -} - -pub fn build_shared_args(local_job: bool) -> Vec { - let mut args = vec![ - Arg::new(TARGET_EXE).long(TARGET_EXE).required(true), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(COVERAGE_DIR) - .required(!local_job) - .long(COVERAGE_DIR) - .value_parser(value_parser!(PathBuf)), - Arg::new(CHECK_FUZZER_HELP) - .action(ArgAction::SetTrue) - .long(CHECK_FUZZER_HELP), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CRASHES_DIR) - .long(CRASHES_DIR) - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(REGRESSION_REPORTS_DIR) - .long(REGRESSION_REPORTS_DIR) - .required(local_job) - .value_parser(value_parser!(PathBuf)), - Arg::new(REPORTS_DIR) - .long(REPORTS_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(NO_REPRO_DIR) - .long(NO_REPRO_DIR) - .required(false) - .value_parser(value_parser!(PathBuf)), - Arg::new(UNIQUE_REPORTS_DIR) - .long(UNIQUE_REPORTS_DIR) - .value_parser(value_parser!(PathBuf)) - .required(true), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - ]; - if local_job { - args.push(Arg::new(REPORT_NAMES).long(REPORT_NAMES).num_args(0..)) - } - args -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("execute a local-only libfuzzer regression task") - .args(&build_shared_args(true)) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerRegression { target_exe: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs index 30f9c446c8..88c3cd1a3d 100644 --- a/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs +++ b/src/agent/onefuzz-task/src/local/libfuzzer_test_input.rs @@ -1,97 +1,14 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_RETRY_COUNT, - TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, - }, - tasks::report::libfuzzer_report::{test_input, TestInputArgs}, -}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, Command}; -use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender).await?; - - let target_exe = args - .get_one::(TARGET_EXE) - .expect("marked as required"); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let input = args - .get_one::("input") - .expect("marked as required"); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has a default value"); - - let extra_setup_dir = context.common_config.extra_setup_dir.as_deref(); - let extra_output_dir = context - .common_config - .extra_output - .as_ref() - .map(|x| x.local_path.as_path()); - - let config = TestInputArgs { - target_exe: target_exe.as_path(), - target_env: &target_env, - target_options: &target_options, - input_url: None, - input: input.as_path(), - job_id: context.common_config.job_id, - task_id: context.common_config.task_id, - target_timeout, - check_retry_count, - setup_dir: &context.common_config.setup_dir, - extra_setup_dir, - extra_output_dir, - minimized_stack_depth: None, - machine_identity: context.common_config.machine_identity, - }; - - let result = test_input(config).await?; - println!("{}", serde_json::to_string_pretty(&result)?); - Ok(()) -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).required(true), - Arg::new("input") - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("test a libfuzzer application with a specific input") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct LibfuzzerTestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/local/mod.rs b/src/agent/onefuzz-task/src/local/mod.rs index 9ea530f575..6020cb0fa6 100644 --- a/src/agent/onefuzz-task/src/local/mod.rs +++ b/src/agent/onefuzz-task/src/local/mod.rs @@ -15,7 +15,6 @@ pub mod libfuzzer_fuzz; pub mod libfuzzer_merge; pub mod libfuzzer_regression; pub mod libfuzzer_test_input; -pub mod radamsa; pub mod template; pub mod test_input; pub mod tui; diff --git a/src/agent/onefuzz-task/src/local/radamsa.rs b/src/agent/onefuzz-task/src/local/radamsa.rs deleted file mode 100644 index 4d84de027a..0000000000 --- a/src/agent/onefuzz-task/src/local/radamsa.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -use crate::{ - local::{ - common::{build_local_context, DirectoryMonitorQueue, UiEvent}, - generic_crash_report::{build_report_config, build_shared_args as build_crash_args}, - generic_generator::{build_fuzz_config, build_shared_args as build_fuzz_args}, - }, - tasks::{config::CommonConfig, fuzz::generator::GeneratorTask, report::generic::ReportTask}, -}; -use anyhow::{Context, Result}; -use clap::Command; -use flume::Sender; -use onefuzz::utils::try_wait_all_join_handles; -use std::collections::HashSet; -use tokio::task::spawn; -use uuid::Uuid; - -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, true, event_sender.clone()).await?; - let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?; - let crash_dir = fuzz_config - .crashes - .remote_url()? - .as_file_path() - .ok_or_else(|| format_err!("invalid crash directory"))?; - - tokio::fs::create_dir_all(&crash_dir) - .await - .with_context(|| { - format!( - "unable to create crashes directory: {}", - crash_dir.display() - ) - })?; - - let fuzzer = GeneratorTask::new(fuzz_config); - let fuzz_task = spawn(async move { fuzzer.run().await }); - - let crash_report_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir) - .await - .context("directory monitor failed")?; - let report_config = build_report_config( - args, - Some(crash_report_input_monitor.queue_client), - CommonConfig { - task_id: Uuid::new_v4(), - ..context.common_config.clone() - }, - event_sender, - )?; - let report_task = spawn(async move { ReportTask::new(report_config).managed_run().await }); - - try_wait_all_join_handles(vec![ - fuzz_task, - report_task, - crash_report_input_monitor.handle, - ]) - .await?; - - Ok(()) -} - -pub fn args(name: &'static str) -> Command { - let mut app = Command::new(name).about("run a local generator & crash reporting job"); - - let mut used = HashSet::new(); - for args in &[build_fuzz_args(), build_crash_args()] { - for arg in args { - if used.insert(arg.get_id()) { - app = app.arg(arg); - } - } - } - - app -} diff --git a/src/agent/onefuzz-task/src/local/schema.json b/src/agent/onefuzz-task/src/local/schema.json index 0a1f128e67..e5b00f6e17 100644 --- a/src/agent/onefuzz-task/src/local/schema.json +++ b/src/agent/onefuzz-task/src/local/schema.json @@ -126,7 +126,6 @@ "analyzer_options", "target_exe", "target_options", - "tools", "type" ], "properties": { @@ -182,7 +181,10 @@ } }, "tools": { - "type": "string" + "type": [ + "string", + "null" + ] }, "type": { "type": "string", @@ -893,4 +895,4 @@ ] } } -} +} \ No newline at end of file diff --git a/src/agent/onefuzz-task/src/local/template.rs b/src/agent/onefuzz-task/src/local/template.rs index adcca9bfa3..3393edd89a 100644 --- a/src/agent/onefuzz-task/src/local/template.rs +++ b/src/agent/onefuzz-task/src/local/template.rs @@ -199,6 +199,7 @@ pub async fn launch( job_id: Uuid::new_v4(), instance_id: Uuid::new_v4(), heartbeat_queue: None, + job_result_queue: None, instance_telemetry_key: None, microsoft_telemetry_key: None, logs: None, @@ -244,12 +245,10 @@ mod test { .expect("Couldn't find checked-in schema.json") .replace("\r\n", "\n"); - println!("{}", schema_str); - - assert_eq!( - schema_str.replace('\n', ""), - checked_in_schema.replace('\n', ""), - "The checked-in local fuzzing schema did not match the generated schema." - ); + if schema_str.replace('\n', "") != checked_in_schema.replace('\n', "") { + std::fs::write("src/local/new.schema.json", schema_str) + .expect("The schemas did not match but failed to write new schema to file."); + panic!("The checked-in local fuzzing schema did not match the generated schema. The generated schema can be found at src/local/new.schema.json"); + } } } diff --git a/src/agent/onefuzz-task/src/local/test_input.rs b/src/agent/onefuzz-task/src/local/test_input.rs index 6e59fb3ff5..0018494ec0 100644 --- a/src/agent/onefuzz-task/src/local/test_input.rs +++ b/src/agent/onefuzz-task/src/local/test_input.rs @@ -1,18 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::{ - local::common::{ - build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_ASAN_LOG, - CHECK_RETRY_COUNT, DISABLE_CHECK_DEBUGGER, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, - TARGET_TIMEOUT, - }, - tasks::report::generic::{test_input, TestInputArgs}, -}; use anyhow::Result; use async_trait::async_trait; -use clap::{Arg, ArgAction, Command}; -use flume::Sender; use onefuzz::machine_id::MachineIdentity; use schemars::JsonSchema; use std::{collections::HashMap, path::PathBuf}; @@ -20,82 +10,6 @@ use uuid::Uuid; use super::template::{RunContext, Template}; -pub async fn run(args: &clap::ArgMatches, event_sender: Option>) -> Result<()> { - let context = build_local_context(args, false, event_sender).await?; - - let target_exe = args - .get_one::(TARGET_EXE) - .expect("is marked required"); - let target_env = get_cmd_env(CmdType::Target, args)?; - let target_options = get_cmd_arg(CmdType::Target, args); - let input = args - .get_one::("input") - .expect("is marked required"); - let target_timeout = args.get_one::(TARGET_TIMEOUT).copied(); - let check_retry_count = args - .get_one::(CHECK_RETRY_COUNT) - .copied() - .expect("has default value"); - let check_asan_log = args.get_flag(CHECK_ASAN_LOG); - let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER); - - let config = TestInputArgs { - target_exe: target_exe.as_path(), - target_env: &target_env, - target_options: &target_options, - input_url: None, - input: input.as_path(), - job_id: context.common_config.job_id, - task_id: context.common_config.task_id, - target_timeout, - check_retry_count, - setup_dir: &context.common_config.setup_dir, - extra_setup_dir: context.common_config.extra_setup_dir.as_deref(), - minimized_stack_depth: None, - check_asan_log, - check_debugger, - machine_identity: context.common_config.machine_identity.clone(), - }; - - let result = test_input(config).await?; - println!("{}", serde_json::to_string_pretty(&result)?); - Ok(()) -} - -pub fn build_shared_args() -> Vec { - vec![ - Arg::new(TARGET_EXE).required(true), - Arg::new("input") - .required(true) - .value_parser(value_parser!(PathBuf)), - Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..), - Arg::new(TARGET_OPTIONS) - .default_value("{input}") - .long(TARGET_OPTIONS) - .value_delimiter(' ') - .help("Use a quoted string with space separation to denote multiple arguments"), - Arg::new(TARGET_TIMEOUT) - .long(TARGET_TIMEOUT) - .value_parser(value_parser!(u64)), - Arg::new(CHECK_RETRY_COUNT) - .long(CHECK_RETRY_COUNT) - .value_parser(value_parser!(u64)) - .default_value("0"), - Arg::new(CHECK_ASAN_LOG) - .action(ArgAction::SetTrue) - .long(CHECK_ASAN_LOG), - Arg::new(DISABLE_CHECK_DEBUGGER) - .action(ArgAction::SetTrue) - .long("disable_check_debugger"), - ] -} - -pub fn args(name: &'static str) -> Command { - Command::new(name) - .about("test an application with a specific input") - .args(&build_shared_args()) -} - #[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] pub struct TestInput { input: PathBuf, diff --git a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs index 3ba068a614..05c6c3d169 100644 --- a/src/agent/onefuzz-task/src/tasks/analysis/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/analysis/generic.rs @@ -65,6 +65,8 @@ pub async fn run(config: Config) -> Result<()> { tools.init_pull().await?; } + let job_result_client = config.common.init_job_result().await?; + // the tempdir is always created, however, the reports_path and // reports_monitor_future are only created if we have one of the three // report SyncedDir. The idea is that the option for where to write reports @@ -88,6 +90,7 @@ pub async fn run(config: Config) -> Result<()> { &config.unique_reports, &config.reports, &config.no_repro, + &job_result_client, ); ( Some(reports_dir.path().to_path_buf()), @@ -171,7 +174,7 @@ async fn poll_inputs( } message.delete().await?; } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; } } diff --git a/src/agent/onefuzz-task/src/tasks/config.rs b/src/agent/onefuzz-task/src/tasks/config.rs index 0848379d73..e29e0fd60d 100644 --- a/src/agent/onefuzz-task/src/tasks/config.rs +++ b/src/agent/onefuzz-task/src/tasks/config.rs @@ -14,6 +14,7 @@ use onefuzz::{ machine_id::MachineIdentity, syncdir::{SyncOperation, SyncedDir}, }; +use onefuzz_result::job_result::{init_job_result, TaskJobResultClient}; use onefuzz_telemetry::{ self as telemetry, Event::task_start, EventData, InstanceTelemetryKey, MicrosoftTelemetryKey, Role, @@ -50,6 +51,8 @@ pub struct CommonConfig { pub heartbeat_queue: Option, + pub job_result_queue: Option, + pub instance_telemetry_key: Option, pub microsoft_telemetry_key: Option, @@ -103,6 +106,23 @@ impl CommonConfig { None => Ok(None), } } + + pub async fn init_job_result(&self) -> Result> { + match &self.job_result_queue { + Some(url) => { + let result = init_job_result( + url.clone(), + self.task_id, + self.job_id, + self.machine_identity.machine_id, + self.machine_identity.machine_name.clone(), + ) + .await?; + Ok(Some(result)) + } + None => Ok(None), + } + } } #[derive(Debug, Deserialize)] diff --git a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs index 0b19f03122..704188293b 100644 --- a/src/agent/onefuzz-task/src/tasks/coverage/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/coverage/generic.rs @@ -26,6 +26,8 @@ use onefuzz_file_format::coverage::{ binary::{v1::BinaryCoverageJson as BinaryCoverageJsonV1, BinaryCoverageJson}, source::{v1::SourceCoverageJson as SourceCoverageJsonV1, SourceCoverageJson}, }; +use onefuzz_result::job_result::JobResultData; +use onefuzz_result::job_result::{JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{event, warn, Event::coverage_data, Event::coverage_failed, EventData}; use storage_queue::{Message, QueueClient}; use tokio::fs; @@ -114,7 +116,7 @@ impl CoverageTask { let allowlist = self.load_target_allowlist().await?; let heartbeat = self.config.common.init_heartbeat(None).await?; - + let job_result = self.config.common.init_job_result().await?; let mut seen_inputs = false; let target_exe_path = @@ -129,6 +131,7 @@ impl CoverageTask { coverage, allowlist, heartbeat, + job_result, target_exe.to_string(), )?; @@ -223,6 +226,7 @@ struct TaskContext<'a> { module_allowlist: AllowList, source_allowlist: Arc, heartbeat: Option, + job_result: Option, cache: Arc, } @@ -232,6 +236,7 @@ impl<'a> TaskContext<'a> { coverage: BinaryCoverage, allowlist: TargetAllowList, heartbeat: Option, + job_result: Option, target_exe: String, ) -> Result { let cache = DebugInfoCache::new(allowlist.source_files.clone()); @@ -251,6 +256,7 @@ impl<'a> TaskContext<'a> { module_allowlist: allowlist.modules, source_allowlist: Arc::new(allowlist.source_files), heartbeat, + job_result, cache: Arc::new(cache), }) } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs index d9116a1ed2..bd7511cac2 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/generator.rs @@ -73,6 +73,7 @@ impl GeneratorTask { } let hb_client = self.config.common.init_heartbeat(None).await?; + let jr_client = self.config.common.init_job_result().await?; for dir in &self.config.readonly_inputs { dir.init_pull().await?; @@ -84,7 +85,10 @@ impl GeneratorTask { self.config.ensemble_sync_delay, ); - let crash_dir_monitor = self.config.crashes.monitor_results(new_result, false); + let crash_dir_monitor = self + .config + .crashes + .monitor_results(new_result, false, &jr_client); let fuzzer = self.fuzzing_loop(hb_client); @@ -298,6 +302,7 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), + job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs index 3336ed4d7a..32f3372958 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/libfuzzer/common.rs @@ -1,7 +1,11 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender, utils::default_bool_true}; +use crate::tasks::{ + config::CommonConfig, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::default_bool_true, +}; use anyhow::{Context, Result}; use arraydeque::{ArrayDeque, Wrapping}; use async_trait::async_trait; @@ -12,6 +16,7 @@ use onefuzz::{ process::ExitStatus, syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir}, }; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{new_coverage, new_crashdump, new_result, runtime_stats}, EventData, @@ -126,21 +131,31 @@ where self.verify().await?; let hb_client = self.config.common.init_heartbeat(None).await?; + let jr_client = self.config.common.init_job_result().await?; // To be scheduled. let resync = self.continuous_sync_inputs(); - let new_inputs = self.config.inputs.monitor_results(new_coverage, true); - let new_crashes = self.config.crashes.monitor_results(new_result, true); + + let new_inputs = self + .config + .inputs + .monitor_results(new_coverage, true, &jr_client); + let new_crashes = self + .config + .crashes + .monitor_results(new_result, true, &jr_client); let new_crashdumps = async { if let Some(crashdumps) = &self.config.crashdumps { - crashdumps.monitor_results(new_crashdump, true).await + crashdumps + .monitor_results(new_crashdump, true, &jr_client) + .await } else { Ok(()) } }; let (stats_sender, stats_receiver) = mpsc::unbounded_channel(); - let report_stats = report_runtime_stats(stats_receiver, hb_client); + let report_stats = report_runtime_stats(stats_receiver, &hb_client, &jr_client); let fuzzers = self.run_fuzzers(Some(&stats_sender)); futures::try_join!( resync, @@ -183,7 +198,7 @@ where .inputs .local_path .parent() - .ok_or_else(|| anyhow!("Invalid input path"))?; + .ok_or_else(|| anyhow!("invalid input path"))?; let temp_path = task_dir.join(".temp"); tokio::fs::create_dir_all(&temp_path).await?; let temp_dir = tempdir_in(temp_path)?; @@ -501,7 +516,7 @@ impl TotalStats { self.execs_sec = self.worker_stats.values().map(|x| x.execs_sec).sum(); } - fn report(&self) { + async fn report(&self, jr_client: &Option) { event!( runtime_stats; EventData::Count = self.count, @@ -513,6 +528,17 @@ impl TotalStats { EventData::Count = self.count, EventData::ExecsSecond = self.execs_sec ); + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::RuntimeStats, + HashMap::from([ + ("total_count".to_string(), self.count as f64), + ("execs_sec".to_string(), self.execs_sec), + ]), + ) + .await; + } } } @@ -542,7 +568,8 @@ impl Timer { // are approximating nearest-neighbor interpolation on the runtime stats time series. async fn report_runtime_stats( mut stats_channel: mpsc::UnboundedReceiver, - heartbeat_client: impl HeartbeatSender, + heartbeat_client: &Option, + jr_client: &Option, ) -> Result<()> { // Cache the last-reported stats for a given worker. // @@ -551,7 +578,7 @@ async fn report_runtime_stats( let mut total = TotalStats::default(); // report all zeros to start - total.report(); + total.report(jr_client).await; let timer = Timer::new(RUNTIME_STATS_PERIOD); @@ -560,10 +587,10 @@ async fn report_runtime_stats( Some(stats) = stats_channel.recv() => { heartbeat_client.alive(); total.update(stats); - total.report() + total.report(jr_client).await } _ = timer.wait() => { - total.report() + total.report(jr_client).await } } } diff --git a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs index de1e1106ba..3f00e20b8d 100644 --- a/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs +++ b/src/agent/onefuzz-task/src/tasks/fuzz/supervisor.rs @@ -79,7 +79,10 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { remote_path: config.crashes.remote_path.clone(), }; crashes.init().await?; - let monitor_crashes = crashes.monitor_results(new_result, false); + + let jr_client = config.common.init_job_result().await?; + + let monitor_crashes = crashes.monitor_results(new_result, false, &jr_client); // setup crashdumps let (crashdump_dir, monitor_crashdumps) = { @@ -95,9 +98,12 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { }; let monitor_dir = crashdump_dir.clone(); + let monitor_jr_client = config.common.init_job_result().await?; let monitor_crashdumps = async move { if let Some(crashdumps) = monitor_dir { - crashdumps.monitor_results(new_crashdump, false).await + crashdumps + .monitor_results(new_crashdump, false, &monitor_jr_client) + .await } else { Ok(()) } @@ -129,11 +135,13 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { if let Some(no_repro) = &config.no_repro { no_repro.init().await?; } + let monitor_reports_future = monitor_reports( reports_dir.path(), &config.unique_reports, &config.reports, &config.no_repro, + &jr_client, ); let inputs = SyncedDir { @@ -156,7 +164,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> { delay_with_jitter(delay).await; } } - let monitor_inputs = inputs.monitor_results(new_coverage, false); + let monitor_inputs = inputs.monitor_results(new_coverage, false, &jr_client); let inputs_sync_cancellation = CancellationToken::new(); // never actually cancelled let inputs_sync_task = inputs.continuous_sync(Pull, config.ensemble_sync_delay, &inputs_sync_cancellation); @@ -444,6 +452,7 @@ mod tests { task_id: Default::default(), instance_id: Default::default(), heartbeat_queue: Default::default(), + job_result_queue: Default::default(), instance_telemetry_key: Default::default(), microsoft_telemetry_key: Default::default(), logs: Default::default(), diff --git a/src/agent/onefuzz-task/src/tasks/heartbeat.rs b/src/agent/onefuzz-task/src/tasks/heartbeat.rs index 515fa39d0c..e13b661909 100644 --- a/src/agent/onefuzz-task/src/tasks/heartbeat.rs +++ b/src/agent/onefuzz-task/src/tasks/heartbeat.rs @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -use crate::onefuzz::heartbeat::HeartbeatClient; use anyhow::Result; +use onefuzz::heartbeat::HeartbeatClient; use reqwest::Url; use serde::{self, Deserialize, Serialize}; use std::time::Duration; diff --git a/src/agent/onefuzz-task/src/tasks/merge/generic.rs b/src/agent/onefuzz-task/src/tasks/merge/generic.rs index 4f2e8234a8..3b6a2094d8 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/generic.rs @@ -83,7 +83,7 @@ pub async fn spawn(config: &Config) -> Result<()> { } } } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; }; } diff --git a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs index 1c334b3f18..2d53bc8c07 100644 --- a/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs +++ b/src/agent/onefuzz-task/src/tasks/merge/libfuzzer_merge.rs @@ -120,7 +120,7 @@ async fn process_message(config: &Config, input_queue: QueueClient) -> Result<() } Ok(()) } else { - warn!("no new candidate inputs found, sleeping"); + debug!("no new candidate inputs found, sleeping"); delay_with_jitter(EMPTY_QUEUE_DELAY).await; Ok(()) } diff --git a/src/agent/onefuzz-task/src/tasks/regression/common.rs b/src/agent/onefuzz-task/src/tasks/regression/common.rs index 60023cfa6e..b61a97df4c 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/common.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/common.rs @@ -2,12 +2,14 @@ // Licensed under the MIT License. use crate::tasks::{ + config::CommonConfig, heartbeat::{HeartbeatSender, TaskHeartbeatClient}, report::crash_report::{parse_report_file, CrashTestResult, RegressionReport}, }; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::syncdir::SyncedDir; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use std::path::PathBuf; @@ -24,7 +26,7 @@ pub trait RegressionHandler { /// Runs the regression task pub async fn run( - heartbeat_client: Option, + common_config: &CommonConfig, regression_reports: &SyncedDir, crashes: &SyncedDir, report_dirs: &[&SyncedDir], @@ -35,6 +37,9 @@ pub async fn run( info!("starting regression task"); regression_reports.init().await?; + let heartbeat_client = common_config.init_heartbeat(None).await?; + let job_result_client = common_config.init_job_result().await?; + handle_crash_reports( handler, crashes, @@ -42,6 +47,7 @@ pub async fn run( report_list, regression_reports, &heartbeat_client, + &job_result_client, ) .await .context("handling crash reports")?; @@ -52,6 +58,7 @@ pub async fn run( readonly_inputs, regression_reports, &heartbeat_client, + &job_result_client, ) .await .context("handling inputs")?; @@ -71,6 +78,7 @@ pub async fn handle_inputs( readonly_inputs: &SyncedDir, regression_reports: &SyncedDir, heartbeat_client: &Option, + job_result_client: &Option, ) -> Result<()> { readonly_inputs.init_pull().await?; let mut input_files = tokio::fs::read_dir(&readonly_inputs.local_path).await?; @@ -95,7 +103,7 @@ pub async fn handle_inputs( crash_test_result, original_crash_test_result: None, } - .save(None, regression_reports) + .save(None, regression_reports, job_result_client) .await? } @@ -109,6 +117,7 @@ pub async fn handle_crash_reports( report_list: &Option>, regression_reports: &SyncedDir, heartbeat_client: &Option, + job_result_client: &Option, ) -> Result<()> { // without crash report containers, skip this method if report_dirs.is_empty() { @@ -158,7 +167,7 @@ pub async fn handle_crash_reports( crash_test_result, original_crash_test_result: Some(original_crash_test_result), } - .save(Some(file_name), regression_reports) + .save(Some(file_name), regression_reports, job_result_client) .await? } } diff --git a/src/agent/onefuzz-task/src/tasks/regression/generic.rs b/src/agent/onefuzz-task/src/tasks/regression/generic.rs index 640e80db9a..8570208d59 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/generic.rs @@ -89,7 +89,6 @@ impl GenericRegressionTask { pub async fn run(&self) -> Result<()> { info!("Starting generic regression task"); - let heartbeat_client = self.config.common.init_heartbeat(None).await?; let mut report_dirs = vec![]; for dir in vec![ @@ -103,7 +102,7 @@ impl GenericRegressionTask { report_dirs.push(dir); } common::run( - heartbeat_client, + &self.config.common, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs index 06dd7c00d9..e65f46bb64 100644 --- a/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs +++ b/src/agent/onefuzz-task/src/tasks/regression/libfuzzer.rs @@ -103,9 +103,8 @@ impl LibFuzzerRegressionTask { report_dirs.push(dir); } - let heartbeat_client = self.config.common.init_heartbeat(None).await?; common::run( - heartbeat_client, + &self.config.common, &self.config.regression_reports, &self.config.crashes, &report_dirs, diff --git a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs index 23171bc432..290b98ccde 100644 --- a/src/agent/onefuzz-task/src/tasks/report/crash_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/crash_report.rs @@ -3,6 +3,7 @@ use anyhow::{Context, Result}; use onefuzz::{blob::BlobUrl, monitor::DirectoryMonitor, syncdir::SyncedDir}; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{ Event::{ new_report, new_unable_to_reproduce, new_unique_report, regression_report, @@ -12,6 +13,7 @@ use onefuzz_telemetry::{ }; use serde::{Deserialize, Serialize}; use stacktrace_parser::CrashLog; +use std::collections::HashMap; use std::path::{Path, PathBuf}; use uuid::Uuid; @@ -111,6 +113,7 @@ impl RegressionReport { self, report_name: Option, regression_reports: &SyncedDir, + jr_client: &Option, ) -> Result<()> { let (event, name) = match &self.crash_test_result { CrashTestResult::CrashReport(report) => { @@ -126,6 +129,15 @@ impl RegressionReport { if upload_or_save_local(&self, &name, regression_reports).await? { event!(event; EventData::Path = name.clone()); metric!(event; 1.0; EventData::Path = name.clone()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewRegressionReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } Ok(()) } @@ -149,6 +161,7 @@ impl CrashTestResult { unique_reports: &Option, reports: &Option, no_repro: &Option, + jr_client: &Option, ) -> Result<()> { match self { Self::CrashReport(report) => { @@ -158,6 +171,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, unique_reports).await? { event!(new_unique_report; EventData::Path = report.unique_blob_name()); metric!(new_unique_report; 1.0; EventData::Path = report.unique_blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewUniqueReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } @@ -166,6 +188,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, reports).await? { event!(new_report; EventData::Path = report.blob_name()); metric!(new_report; 1.0; EventData::Path = report.blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NewReport, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } } @@ -176,6 +207,15 @@ impl CrashTestResult { if upload_or_save_local(&report, &name, no_repro).await? { event!(new_unable_to_reproduce; EventData::Path = report.blob_name()); metric!(new_unable_to_reproduce; 1.0; EventData::Path = report.blob_name()); + + if let Some(jr_client) = jr_client { + let _ = jr_client + .send_direct( + JobResultData::NoReproCrashingInput, + HashMap::from([("count".to_string(), 1.0)]), + ) + .await; + } } } } @@ -324,6 +364,7 @@ pub async fn monitor_reports( unique_reports: &Option, reports: &Option, no_crash: &Option, + jr_client: &Option, ) -> Result<()> { if unique_reports.is_none() && reports.is_none() && no_crash.is_none() { debug!("no report directories configured"); @@ -334,7 +375,9 @@ pub async fn monitor_reports( while let Some(file) = monitor.next_file().await? { let result = parse_report_file(file).await?; - result.save(unique_reports, reports, no_crash).await?; + result + .save(unique_reports, reports, no_crash, jr_client) + .await?; } Ok(()) diff --git a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs index 9b626a7d89..b8659845de 100644 --- a/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/dotnet/generic.rs @@ -8,25 +8,25 @@ use std::{ sync::Arc, }; +use crate::tasks::report::crash_report::*; +use crate::tasks::report::dotnet::common::collect_exception_info; +use crate::tasks::{ + config::CommonConfig, + generic::input_poller::*, + heartbeat::{HeartbeatSender, TaskHeartbeatClient}, + utils::{default_bool_true, try_resolve_setup_relative_path}, +}; use anyhow::{Context, Result}; use async_trait::async_trait; use onefuzz::expand::Expand; use onefuzz::fs::set_executable; use onefuzz::{blob::BlobUrl, sha256, syncdir::SyncedDir}; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use storage_queue::{Message, QueueClient}; use tokio::fs; -use crate::tasks::report::crash_report::*; -use crate::tasks::report::dotnet::common::collect_exception_info; -use crate::tasks::{ - config::CommonConfig, - generic::input_poller::*, - heartbeat::{HeartbeatSender, TaskHeartbeatClient}, - utils::{default_bool_true, try_resolve_setup_relative_path}, -}; - const DOTNET_DUMP_TOOL_NAME: &str = "dotnet-dump"; #[derive(Debug, Deserialize)] @@ -114,15 +114,18 @@ impl DotnetCrashReportTask { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, + job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; + let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, + job_result_client, }) } @@ -260,6 +263,7 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await; diff --git a/src/agent/onefuzz-task/src/tasks/report/generic.rs b/src/agent/onefuzz-task/src/tasks/report/generic.rs index 9088f98acc..8ad259f0a5 100644 --- a/src/agent/onefuzz-task/src/tasks/report/generic.rs +++ b/src/agent/onefuzz-task/src/tasks/report/generic.rs @@ -13,6 +13,7 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, input_tester::Tester, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -73,7 +74,9 @@ impl ReportTask { pub async fn managed_run(&mut self) -> Result<()> { info!("Starting generic crash report task"); let heartbeat_client = self.config.common.init_heartbeat(None).await?; - let mut processor = GenericReportProcessor::new(&self.config, heartbeat_client); + let job_result_client = self.config.common.init_job_result().await?; + let mut processor = + GenericReportProcessor::new(&self.config, heartbeat_client, job_result_client); #[allow(clippy::manual_flatten)] for entry in [ @@ -183,13 +186,19 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct GenericReportProcessor<'a> { config: &'a Config, heartbeat_client: Option, + job_result_client: Option, } impl<'a> GenericReportProcessor<'a> { - pub fn new(config: &'a Config, heartbeat_client: Option) -> Self { + pub fn new( + config: &'a Config, + heartbeat_client: Option, + job_result_client: Option, + ) -> Self { Self { config, heartbeat_client, + job_result_client, } } @@ -239,6 +248,7 @@ impl<'a> Processor for GenericReportProcessor<'a> { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await .context("saving report failed") diff --git a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs index f18f638fa3..587ed2e3dc 100644 --- a/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs +++ b/src/agent/onefuzz-task/src/tasks/report/libfuzzer_report.rs @@ -13,6 +13,7 @@ use async_trait::async_trait; use onefuzz::{ blob::BlobUrl, libfuzzer::LibFuzzer, machine_id::MachineIdentity, sha256, syncdir::SyncedDir, }; +use onefuzz_result::job_result::TaskJobResultClient; use reqwest::Url; use serde::Deserialize; use std::{ @@ -196,15 +197,18 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result { pub struct AsanProcessor { config: Arc, heartbeat_client: Option, + job_result_client: Option, } impl AsanProcessor { pub async fn new(config: Arc) -> Result { let heartbeat_client = config.common.init_heartbeat(None).await?; + let job_result_client = config.common.init_job_result().await?; Ok(Self { config, heartbeat_client, + job_result_client, }) } @@ -257,6 +261,7 @@ impl Processor for AsanProcessor { &self.config.unique_reports, &self.config.reports, &self.config.no_repro, + &self.job_result_client, ) .await } diff --git a/src/agent/onefuzz/Cargo.toml b/src/agent/onefuzz/Cargo.toml index f834c7b6dc..55042607fa 100644 --- a/src/agent/onefuzz/Cargo.toml +++ b/src/agent/onefuzz/Cargo.toml @@ -44,6 +44,7 @@ tempfile = "3.8.0" process_control = "4.0" reqwest-retry = { path = "../reqwest-retry" } onefuzz-telemetry = { path = "../onefuzz-telemetry" } +onefuzz-result = { path = "../onefuzz-result" } stacktrace-parser = { path = "../stacktrace-parser" } backoff = { version = "0.4", features = ["tokio"] } diff --git a/src/agent/onefuzz/src/blob/url.rs b/src/agent/onefuzz/src/blob/url.rs index f55ffbb23a..134b59dea0 100644 --- a/src/agent/onefuzz/src/blob/url.rs +++ b/src/agent/onefuzz/src/blob/url.rs @@ -192,10 +192,15 @@ impl BlobContainerUrl { } pub fn as_path(&self, prefix: impl AsRef) -> Result { - let dir = self - .account() - .ok_or_else(|| anyhow!("Invalid container Url"))?; - Ok(prefix.as_ref().join(dir)) + match (self.account(), self.container()) { + (Some(account), Some(container)) => { + let mut path = PathBuf::new(); + path.push(account); + path.push(container); + Ok(prefix.as_ref().join(path)) + } + _ => bail!("Invalid container Url"), + } } } @@ -526,4 +531,14 @@ mod tests { "id:000000,sig:06,src:000000,op:havoc,rep:128" ); } + + #[test] + fn test_as_path() -> Result<()> { + let root = PathBuf::from(r"/onefuzz"); + let url = BlobContainerUrl::parse("https://myaccount.blob.core.windows.net/mycontainer")?; + let path = url.as_path(root)?; + assert_eq!(PathBuf::from(r"/onefuzz/myaccount/mycontainer"), path); + + Ok(()) + } } diff --git a/src/agent/onefuzz/src/syncdir.rs b/src/agent/onefuzz/src/syncdir.rs index 6ff4d4fc68..e170901bdc 100644 --- a/src/agent/onefuzz/src/syncdir.rs +++ b/src/agent/onefuzz/src/syncdir.rs @@ -11,10 +11,12 @@ use crate::{ }; use anyhow::{Context, Result}; use dunce::canonicalize; +use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient}; use onefuzz_telemetry::{Event, EventData}; use reqwest::{StatusCode, Url}; use reqwest_retry::{RetryCheck, SendRetry, DEFAULT_RETRY_PERIOD, MAX_RETRY_ATTEMPTS}; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; use std::{env::current_dir, path::PathBuf, str, time::Duration}; use tokio::{fs, select}; use tokio_util::sync::CancellationToken; @@ -241,6 +243,7 @@ impl SyncedDir { url: BlobContainerUrl, event: Event, ignore_dotfiles: bool, + jr_client: &Option, ) -> Result<()> { debug!("monitoring {}", path.display()); @@ -265,7 +268,6 @@ impl SyncedDir { if ignore_dotfiles && file_name_event_str.starts_with('.') { continue; } - event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); if let Some(jr_client) = jr_client { @@ -281,7 +283,11 @@ impl SyncedDir { Event::new_coverage => { jr_client .send_direct( +<<<<<<< HEAD JobResultData::NewCoverage, +======= + JobResultData::CoverageData, +>>>>>>> c8986aaa (Revert "Release 8.7.1 (hotfix) (#3459)" (#3468)) HashMap::from([("count".to_string(), 1.0)]), ) .await; @@ -337,6 +343,9 @@ impl SyncedDir { event!(event.clone(); EventData::Path = file_name_event_str); metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str); <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> c8986aaa (Revert "Release 8.7.1 (hotfix) (#3459)" (#3468)) if let Some(jr_client) = jr_client { match event { Event::new_result => { @@ -350,7 +359,11 @@ impl SyncedDir { Event::new_coverage => { jr_client .send_direct( +<<<<<<< HEAD JobResultData::NewCoverage, +======= + JobResultData::CoverageData, +>>>>>>> c8986aaa (Revert "Release 8.7.1 (hotfix) (#3459)" (#3468)) HashMap::from([("count".to_string(), 1.0)]), ) .await; @@ -360,8 +373,11 @@ impl SyncedDir { } } } +<<<<<<< HEAD ======= >>>>>>> c69deed5 (Release 8.7.1 (hotfix) (#3459)) +======= +>>>>>>> c8986aaa (Revert "Release 8.7.1 (hotfix) (#3459)" (#3468)) if let Err(err) = uploader.upload(item.clone()).await { let error_message = format!( "Couldn't upload file. path:{} dir:{} err:{:?}", @@ -393,7 +409,12 @@ impl SyncedDir { /// The intent of this is to support use cases where we usually want a directory /// to be initialized, but a user-supplied binary, (such as AFL) logically owns /// a directory, and may reset it. - pub async fn monitor_results(&self, event: Event, ignore_dotfiles: bool) -> Result<()> { + pub async fn monitor_results( + &self, + event: Event, + ignore_dotfiles: bool, + job_result_client: &Option, + ) -> Result<()> { if let Some(url) = self.remote_path.clone() { loop { debug!("waiting to monitor {}", self.local_path.display()); @@ -412,6 +433,7 @@ impl SyncedDir { url.clone(), event.clone(), ignore_dotfiles, + job_result_client, ) .await?; } diff --git a/src/deployment/bicep-templates/storageAccounts.bicep b/src/deployment/bicep-templates/storageAccounts.bicep index 6a96cea6a0..27f2da21d8 100644 --- a/src/deployment/bicep-templates/storageAccounts.bicep +++ b/src/deployment/bicep-templates/storageAccounts.bicep @@ -33,7 +33,7 @@ var storageAccountFuncQueuesParams = [ 'update-queue' 'webhooks' 'signalr-events' - 'custom-metrics' + 'job-result' ] var fileChangesQueueIndex = 0 diff --git a/src/integration-tests/integration-test.py b/src/integration-tests/integration-test.py index 6c642b1ca7..1c7c3a1df9 100755 --- a/src/integration-tests/integration-test.py +++ b/src/integration-tests/integration-test.py @@ -88,6 +88,7 @@ class Integration(BaseModel): target_method: Optional[str] setup_dir: Optional[str] target_env: Optional[Dict[str, str]] + pool: PoolName TARGETS: Dict[str, Integration] = { @@ -97,6 +98,7 @@ class Integration(BaseModel): target_exe="fuzz.exe", inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, + pool="linux", ), "linux-libfuzzer": Integration( template=TemplateType.libfuzzer, @@ -124,6 +126,7 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], + pool="linux", ), "linux-libfuzzer-with-options": Integration( template=TemplateType.libfuzzer, @@ -137,6 +140,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, fuzzing_target_options=["-runs=10000000"], + pool="linux", ), "linux-libfuzzer-dlopen": Integration( template=TemplateType.libfuzzer, @@ -150,6 +154,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, + pool="linux", ), "linux-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -163,6 +168,7 @@ class Integration(BaseModel): }, reboot_after_setup=True, use_setup=True, + pool="linux", ), "linux-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -180,6 +186,7 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, + pool="linux", ), "linux-libfuzzer-aarch64-crosscompile": Integration( template=TemplateType.libfuzzer_qemu_user, @@ -189,6 +196,7 @@ class Integration(BaseModel): use_setup=True, wait_for_files={ContainerType.inputs: 2, ContainerType.crashes: 1}, test_repro=False, + pool="linux", ), "linux-libfuzzer-rust": Integration( template=TemplateType.libfuzzer, @@ -196,6 +204,7 @@ class Integration(BaseModel): target_exe="fuzz_target_1", wait_for_files={ContainerType.unique_reports: 1, ContainerType.coverage: 1}, fuzzing_target_options=["--test:{extra_setup_dir}"], + pool="linux", ), "linux-trivial-crash": Integration( template=TemplateType.radamsa, @@ -204,6 +213,7 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, + pool="linux", ), "linux-trivial-crash-asan": Integration( template=TemplateType.radamsa, @@ -256,6 +266,7 @@ class Integration(BaseModel): "--only_asan_failures", "--write_test_file={extra_output_dir}/test.txt", ], + pool="windows", ), "windows-libfuzzer-linked-library": Integration( template=TemplateType.libfuzzer, @@ -268,6 +279,7 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, + pool="windows", ), "windows-libfuzzer-load-library": Integration( template=TemplateType.libfuzzer, @@ -280,6 +292,7 @@ class Integration(BaseModel): ContainerType.coverage: 1, }, use_setup=True, + pool="windows", ), "windows-libfuzzer-dotnet": Integration( template=TemplateType.libfuzzer_dotnet, @@ -297,6 +310,7 @@ class Integration(BaseModel): ContainerType.unique_reports: 1, }, test_repro=False, + pool="windows", ), "windows-trivial-crash": Integration( template=TemplateType.radamsa, @@ -305,6 +319,7 @@ class Integration(BaseModel): inputs="seeds", wait_for_files={ContainerType.unique_reports: 1}, inject_fake_regression=True, + pool="windows", ), } @@ -373,7 +388,7 @@ def try_info_get(data: Any) -> None: self.inject_log(self.start_log_marker) for entry in os_list: - name = PoolName(f"testpool-{entry.name}-{self.test_id}") + name = self.build_pool_name(entry.name) self.logger.info("creating pool: %s:%s", entry.name, name) self.of.pools.create(name, entry) self.logger.info("creating scaleset for pool: %s", name) @@ -594,12 +609,9 @@ def launch( ) -> List[UUID]: """Launch all of the fuzzing templates""" - pools: Dict[OS, Pool] = {} + pool = None if unmanaged_pool is not None: - pools[unmanaged_pool.the_os] = self.of.pools.get(unmanaged_pool.pool_name) - else: - for pool in self.of.pools.list(): - pools[pool.os] = pool + pool = unmanaged_pool.pool_name job_ids = [] @@ -610,8 +622,8 @@ def launch( if config.os not in os_list: continue - if config.os not in pools.keys(): - raise Exception(f"No pool for target: {target} ,os: {config.os}") + if pool is None: + pool = self.build_pool_name(config.pool) self.logger.info("launching: %s", target) @@ -636,11 +648,15 @@ def launch( job: Optional[Job] = None <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> c69deed5 (Release 8.7.1 (hotfix) (#3459)) +======= + +>>>>>>> c8986aaa (Revert "Release 8.7.1 (hotfix) (#3459)" (#3468)) job = self.build_job( - duration, pools, target, config, setup, target_exe, inputs + duration, pool, target, config, setup, target_exe, inputs ) if config.inject_fake_regression and job is not None: @@ -656,7 +672,7 @@ def launch( def build_job( self, duration: int, - pools: Dict[OS, Pool], + pool: PoolName, target: str, config: Integration, setup: Optional[Directory], @@ -672,7 +688,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -697,7 +713,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, target_dll=File(config.target_exe), inputs=inputs, setup_dir=setup, @@ -713,7 +729,7 @@ def build_job( self.project, target, BUILD, - pools[config.os].name, + pool, inputs=inputs, target_exe=target_exe, duration=duration, @@ -726,7 +742,7 @@ def build_job( self.project, target, BUILD, - pool_name=pools[config.os].name, + pool_name=pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -741,7 +757,7 @@ def build_job( self.project, target, BUILD, - pool_name=pools[config.os].name, + pool_name=pool, target_exe=target_exe, inputs=inputs, setup_dir=setup, @@ -1271,6 +1287,9 @@ def check_logs_for_errors(self) -> None: if seen_errors: raise Exception("logs included errors") + + def build_pool_name(self, os_type: str) -> PoolName: + return PoolName(f"testpool-{os_type}-{self.test_id}") def check_jobs( self, diff --git a/src/runtime-tools/linux/setup.sh b/src/runtime-tools/linux/setup.sh old mode 100755 new mode 100644 index f6859003b4..794e827f4d --- a/src/runtime-tools/linux/setup.sh +++ b/src/runtime-tools/linux/setup.sh @@ -18,6 +18,14 @@ export DOTNET_CLI_HOME="$DOTNET_ROOT" export ONEFUZZ_ROOT=/onefuzz export LLVM_SYMBOLIZER_PATH=/onefuzz/bin/llvm-symbolizer +# `logger` won't work on mariner unless we install this package first +if type yum > /dev/null 2> /dev/null; then + until yum install -y util-linux sudo; do + echo "yum failed. sleep 10s, then retrying" + sleep 10 + done +fi + logger "onefuzz: making directories" sudo mkdir -p /onefuzz/downloaded sudo chown -R $(whoami) /onefuzz @@ -134,31 +142,53 @@ if type apt > /dev/null 2> /dev/null; then sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH fi - # Install dotnet + # Needed to install dotnet until sudo apt install -y curl libicu-dev; do logger "apt failed, sleeping 10s then retrying" sleep 10 done +elif type yum > /dev/null 2> /dev/null; then + until yum install -y gdb gdb-gdbserver libunwind awk ca-certificates tar yum-utils shadow-utils cronie procps; do + echo "yum failed. sleep 10s, then retrying" + sleep 10 + done + + # Install updated Microsoft Open Management Infrastructure - github.com/microsoft/omi + yum-config-manager --add-repo=https://packages.microsoft.com/config/rhel/8/prod.repo 2>&1 | logger -s -i -t 'onefuzz-OMI-add-MS-repo' + yum install -y omi 2>&1 | logger -s -i -t 'onefuzz-OMI-install' - logger "downloading dotnet install" - curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' - chmod +x dotnet-install.sh - for version in "${DOTNET_VERSIONS[@]}"; do - logger "running dotnet install $version" - /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' - done - rm dotnet-install.sh - - logger "install dotnet tools" - pushd "$DOTNET_ROOT" - ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - "$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' - popd + if ! [ -f ${LLVM_SYMBOLIZER_PATH} ]; then + until yum install -y llvm-12.0.1; do + echo "yum failed, sleeping 10s then retrying" + sleep 10 + done + + # If specifying symbolizer, exe name must be a "known symbolizer". + # Using `llvm-symbolizer` works for clang 8 .. 12. + sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH + fi fi +# Install dotnet +logger "downloading dotnet install" +curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install' +chmod +x dotnet-install.sh + +for version in "${DOTNET_VERSIONS[@]}"; do + logger "running dotnet install $version" + /bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup' +done +rm dotnet-install.sh + +logger "install dotnet tools" +pushd "$DOTNET_ROOT" +ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +"$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools' +popd + if [ -v DOCKER_BUILD ]; then echo "building for docker" elif [ -d /etc/systemd/system ]; then