-
Notifications
You must be signed in to change notification settings - Fork 386
/
Copy pathtracing_trace.rb
198 lines (161 loc) · 5.92 KB
/
tracing_trace.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
# Used to quickly run benchmark under RSpec as part of the usual test suite, to validate it didn't bitrot
VALIDATE_BENCHMARK_MODE = ENV['VALIDATE_BENCHMARK'] == 'true'
return unless __FILE__ == $PROGRAM_NAME || VALIDATE_BENCHMARK_MODE
require_relative 'benchmarks_helper'
class TracingTraceBenchmark
module NoopWriter
def write(trace)
# no-op
end
end
module NoopAdapter
Response = Struct.new(:code, :body)
def open
Response.new(200)
end
end
# @param [Integer] time in seconds. The default is 12 seconds because having over 105 samples allows the
# benchmarking platform to calculate helpful aggregate stats. Because benchmark-ips tries to run one iteration
# per 100ms, this means we'll have around 120 samples (give or take a small margin of error).
# @param [Integer] warmup in seconds. The default is 2 seconds.
def benchmark_time(time: 12, warmup: 2)
VALIDATE_BENCHMARK_MODE ? { time: 0.001, warmup: 0 } : { time: time, warmup: warmup }
end
def benchmark_no_writer
::Datadog::Tracing::Writer.prepend(NoopWriter)
Benchmark.ips do |x|
x.config(**benchmark_time)
def trace(x, depth)
x.report(
"#{depth} span trace - no writer",
(depth.times.map { "Datadog::Tracing.trace('op.name') {" } + depth.times.map { "}" }).join
)
end
trace(x, 1)
trace(x, 10)
trace(x, 100)
x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
# Because the writer runs in the background, on a timed interval, benchmark results will have
# dips (lower ops/sec) whenever the writer wakes up and consumes all pending traces.
# This is OK for our measurements, because we want to measure the full performance cost,
# but it creates high variability, depending on the sampled interval.
# This means that this benchmark will be marked as internally "unstable",
# but we trust it's total average result.
def benchmark_no_network
::Datadog::Core::Transport::HTTP::Adapters::Net.prepend(NoopAdapter)
Benchmark.ips do |x|
x.config(**benchmark_time)
def trace(x, depth)
x.report(
"#{depth} span trace - no network",
(depth.times.map { "Datadog::Tracing.trace('op.name') {" } + depth.times.map { "}" }).join
)
end
trace(x, 1)
trace(x, 10)
trace(x, 100)
x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
def benchmark_to_digest
Datadog::Tracing.trace('op.name') do |span, trace|
Benchmark.ips do |x|
x.config(**benchmark_time)
x.report("trace.to_digest") do
trace.to_digest
end
x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
end
def benchmark_log_correlation
Datadog::Tracing.trace('op.name') do |span, trace|
Benchmark.ips do |x|
x.config(**benchmark_time)
x.report("Tracing.log_correlation") do
Datadog::Tracing.log_correlation
end
x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
end
def benchmark_to_digest_continue
Datadog::Tracing.trace('op.name') do |span, trace|
Benchmark.ips do |x|
x.config(**benchmark_time)
x.report("trace.to_digest - Continue") do
digest = trace.to_digest
Datadog::Tracing.continue_trace!(digest)
end
x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
end
def benchmark_propagation_datadog
Datadog.configure do |c|
if defined?(c.tracing.distributed_tracing.propagation_extract_style)
# Required to run benchmarks against ddtrace 1.x.
# Can be removed when 2.0 is merged to master.
c.tracing.distributed_tracing.propagation_style = ['datadog']
else
c.tracing.propagation_style = ['datadog']
end
end
Datadog::Tracing.trace('op.name') do |span, trace|
injected_trace_digest = trace.to_digest
Benchmark.ips do |x|
x.config(**benchmark_time)
x.report("Propagation - Datadog") do
env = {}
Datadog::Tracing::Contrib::HTTP.inject(injected_trace_digest, env)
extracted_trace_digest = Datadog::Tracing::Contrib::HTTP.extract(env)
raise unless extracted_trace_digest
end
x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
end
def benchmark_propagation_trace_context
Datadog.configure do |c|
c.tracing.propagation_style = ['tracecontext']
end
Datadog::Tracing.trace('op.name') do |span, trace|
injected_trace_digest = trace.to_digest
Benchmark.ips do |x|
x.config(**benchmark_time)
x.report("Propagation - Trace Context") do
env = {}
Datadog::Tracing::Contrib::HTTP.inject(injected_trace_digest, env)
extracted_trace_digest = Datadog::Tracing::Contrib::HTTP.extract(env)
raise unless extracted_trace_digest
end
x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
end
end
puts "Current pid is #{Process.pid}"
def run_benchmark(&block)
# Forking to avoid monkey-patching leaking between benchmarks
pid = fork { block.call }
_, status = Process.wait2(pid)
raise "Benchmark failed with status #{status}" unless status.success?
end
TracingTraceBenchmark.new.instance_exec do
run_benchmark { benchmark_no_writer }
run_benchmark { benchmark_no_network }
run_benchmark { benchmark_to_digest }
run_benchmark { benchmark_log_correlation }
run_benchmark { benchmark_to_digest_continue }
run_benchmark { benchmark_propagation_datadog }
run_benchmark { benchmark_propagation_trace_context }
end