@@ -116,6 +116,7 @@ func runAgent(cmd *cobra.Command, args []string) error {
116116 }()
117117
118118 // Main scraping loop
119+ // Use ticker for interval, but align collection timestamps to interval boundaries
119120 ticker := time .NewTicker (cfg .Agent .Interval )
120121 defer ticker .Stop ()
121122
@@ -125,8 +126,9 @@ func runAgent(cmd *cobra.Command, args []string) error {
125126 logger .String ("prometheus_endpoint" , cfg .Prometheus .Endpoint ),
126127 logger .String ("server_endpoint" , cfg .Server .Endpoint ))
127128
128- // Scrape immediately on start
129- if err := scrapeAndSend (scraper , sender , cfg .Agent .ServerID ); err != nil {
129+ // Scrape immediately on start with aligned timestamp
130+ collectionTime := time .Now ().Truncate (cfg .Agent .Interval )
131+ if err := scrapeAndSendWithTimestamp (scraper , sender , cfg .Agent .ServerID , collectionTime ); err != nil {
130132 logger .Error ("Initial scrape failed" , logger .Err (err ))
131133 }
132134
@@ -135,30 +137,44 @@ func runAgent(cmd *cobra.Command, args []string) error {
135137 select {
136138 case <- ctx .Done ():
137139 return nil
138- case <- ticker .C :
139- if err := scrapeAndSend (scraper , sender , cfg .Agent .ServerID ); err != nil {
140+ case tickTime := <- ticker .C :
141+ // Align collection time to interval boundary
142+ collectionTime := tickTime .Truncate (cfg .Agent .Interval )
143+ if err := scrapeAndSendWithTimestamp (scraper , sender , cfg .Agent .ServerID , collectionTime ); err != nil {
140144 logger .Error ("Scrape failed" , logger .Err (err ))
141145 }
142146 }
143147 }
144148}
145149
146- func scrapeAndSend (scraper * prometheus.Scraper , sender * report.Sender , serverID string ) error {
150+ // scrapeAndSendWithTimestamp scrapes metrics and adds aligned collection timestamp
151+ func scrapeAndSendWithTimestamp (scraper * prometheus.Scraper , sender * report.Sender , serverID string , collectionTime time.Time ) error {
147152 // Scrape Prometheus exporter
148153 data , err := scraper .Scrape ()
149154 if err != nil {
150155 return fmt .Errorf ("failed to scrape prometheus: %w" , err )
151156 }
152157
158+ // Add explicit timestamps to metrics (aligned to collection time)
159+ // This ensures all agents report metrics at the same logical time boundaries
160+ dataWithTimestamp := prometheus .AddTimestamps (data , collectionTime )
161+
153162 // Save to buffer (WAL pattern - actual sending happens in background)
154- if err := sender .SendPrometheus (data , serverID ); err != nil {
163+ if err := sender .SendPrometheus (dataWithTimestamp , serverID ); err != nil {
155164 return fmt .Errorf ("failed to buffer prometheus data: %w" , err )
156165 }
157166
158- logger .Debug ("Prometheus data scraped and buffered" , logger .Int ("bytes" , len (data )))
167+ logger .Debug ("Prometheus data scraped and buffered" ,
168+ logger .Int ("bytes" , len (dataWithTimestamp )),
169+ logger .Time ("collection_time" , collectionTime ))
159170 return nil
160171}
161172
173+ // Legacy function kept for backwards compatibility
174+ func scrapeAndSend (scraper * prometheus.Scraper , sender * report.Sender , serverID string ) error {
175+ return scrapeAndSendWithTimestamp (scraper , sender , serverID , time .Now ().Truncate (5 * time .Second ))
176+ }
177+
162178func runInBackground () error {
163179 // Check config exists
164180 if err := config .RequireConfig (cfgFile ); err != nil {
0 commit comments