|
4 | 4 |
|
5 | 5 | describe 'Memory Leak Prevention' do |
6 | 6 | let(:datafile) { '{"version": "4", "experiments": [], "groups": [], "events": [], "featureFlags": []}' } |
7 | | - |
| 7 | + |
8 | 8 | before do |
9 | 9 | # Clean up any existing instances |
10 | 10 | Optimizely::Project.clear_instance_cache! |
|
18 | 18 | describe 'Thread Creation Prevention' do |
19 | 19 | it 'should not create new threads when using get_or_create_instance repeatedly' do |
20 | 20 | initial_thread_count = Thread.list.size |
21 | | - |
| 21 | + |
22 | 22 | # Simulate the problematic pattern that was causing memory leaks |
23 | 23 | # In the real world, this would be called once per request |
24 | 24 | threads_created = [] |
25 | | - |
| 25 | + |
26 | 26 | 10.times do |i| |
27 | 27 | # Use the safe caching method |
28 | 28 | optimizely = Optimizely::Project.get_or_create_instance(datafile: datafile) |
29 | | - |
| 29 | + |
30 | 30 | # Make a decision to trigger thread creation if any |
31 | 31 | optimizely.create_user_context("user_#{i}") |
32 | | - |
| 32 | + |
33 | 33 | # Track thread count after each creation |
34 | 34 | threads_created << Thread.list.size |
35 | 35 | end |
36 | | - |
| 36 | + |
37 | 37 | final_thread_count = Thread.list.size |
38 | | - |
| 38 | + |
39 | 39 | # Should only have created one cached instance |
40 | 40 | expect(Optimizely::Project.cached_instance_count).to eq(1) |
41 | | - |
| 41 | + |
42 | 42 | # Thread count should not have grown significantly per instance |
43 | 43 | # Allow for some variance due to initialization of first instance |
44 | 44 | expect(final_thread_count).to be <= initial_thread_count + 5 |
45 | | - |
| 45 | + |
46 | 46 | # Verify that we're not creating more threads with each call |
47 | 47 | # After the first few calls, thread count should stabilize |
48 | 48 | stable_count = threads_created[3] |
|
51 | 51 |
|
52 | 52 | it 'demonstrates the memory leak that would occur with repeated Project.new calls' do |
53 | 53 | instances = [] |
54 | | - |
| 54 | + |
55 | 55 | # Simulate the problematic pattern (commented out to avoid actual leak in tests) |
56 | 56 | # This is what users were doing that caused the memory leak: |
57 | 57 | 5.times do |
58 | 58 | # instances << Optimizely::Project.new(datafile: datafile) |
59 | | - # |
| 59 | + # |
60 | 60 | # Instead, show what happens when we create instances without caching |
61 | 61 | # and don't clean them up (simulating the leak condition) |
62 | 62 | instances << Optimizely::Project.new(datafile: datafile) |
63 | 63 | end |
64 | | - |
| 64 | + |
65 | 65 | # Each instance would create its own background threads |
66 | 66 | # In the real memory leak scenario, these would accumulate indefinitely |
67 | 67 | expect(instances.size).to eq(5) |
68 | 68 | expect(instances.uniq.size).to eq(5) # All different instances |
69 | | - |
| 69 | + |
70 | 70 | # Clean up instances to prevent actual memory leak in test |
71 | 71 | instances.each(&:close) |
72 | 72 | end |
|
76 | 76 | it 'should create same cache key for identical configurations' do |
77 | 77 | instance1 = Optimizely::Project.get_or_create_instance(datafile: datafile) |
78 | 78 | instance2 = Optimizely::Project.get_or_create_instance(datafile: datafile) |
79 | | - |
| 79 | + |
80 | 80 | expect(instance1).to be(instance2) |
81 | 81 | expect(Optimizely::Project.cached_instance_count).to eq(1) |
82 | 82 | end |
|
90 | 90 | datafile: datafile, |
91 | 91 | skip_json_validation: false |
92 | 92 | ) |
93 | | - |
| 93 | + |
94 | 94 | expect(instance1).not_to be(instance2) |
95 | 95 | expect(Optimizely::Project.cached_instance_count).to eq(2) |
96 | 96 | end |
|
99 | 99 | describe 'Resource Cleanup' do |
100 | 100 | it 'should properly stop background threads when instance is closed' do |
101 | 101 | instance = Optimizely::Project.get_or_create_instance(datafile: datafile) |
102 | | - |
| 102 | + |
103 | 103 | # Trigger thread creation by making a decision |
104 | 104 | instance.create_user_context('test_user') |
105 | | - |
| 105 | + |
106 | 106 | expect(instance.stopped).to be_falsy |
107 | | - |
| 107 | + |
108 | 108 | instance.close |
109 | | - |
| 109 | + |
110 | 110 | expect(instance.stopped).to be_truthy |
111 | 111 | expect(Optimizely::Project.cached_instance_count).to eq(0) |
112 | 112 | end |
|
116 | 116 | instance2 = Optimizely::Project.get_or_create_instance( |
117 | 117 | datafile: '{"version": "4", "experiments": [{"id": "test"}], "groups": [], "events": [], "featureFlags": []}' |
118 | 118 | ) |
119 | | - |
| 119 | + |
120 | 120 | expect(Optimizely::Project.cached_instance_count).to eq(2) |
121 | 121 | expect(instance1.stopped).to be_falsy |
122 | 122 | expect(instance2.stopped).to be_falsy |
123 | | - |
| 123 | + |
124 | 124 | Optimizely::Project.clear_instance_cache! |
125 | | - |
| 125 | + |
126 | 126 | expect(Optimizely::Project.cached_instance_count).to eq(0) |
127 | 127 | expect(instance1.stopped).to be_truthy |
128 | 128 | expect(instance2.stopped).to be_truthy |
|
132 | 132 | describe 'Production Usage Patterns' do |
133 | 133 | it 'should handle Rails-like request pattern efficiently' do |
134 | 134 | initial_thread_count = Thread.list.size |
135 | | - |
| 135 | + |
136 | 136 | # Simulate Rails controller pattern with cached datafile |
137 | 137 | cached_datafile = datafile |
138 | 138 | request_results = [] |
139 | | - |
| 139 | + |
140 | 140 | # Simulate 50 requests (what would cause significant memory growth before) |
141 | 141 | 50.times do |request_id| |
142 | 142 | # This is the safe pattern that should be used in production |
143 | 143 | optimizely = Optimizely::Project.get_or_create_instance(datafile: cached_datafile) |
144 | | - |
| 144 | + |
145 | 145 | # Simulate making decisions in the request |
146 | 146 | optimizely.create_user_context("user_#{request_id}") |
147 | | - |
| 147 | + |
148 | 148 | # Store result (in real app this would be returned to user) |
149 | 149 | request_results << { |
150 | 150 | request_id: request_id, |
151 | 151 | optimizely_instance_id: optimizely.object_id, |
152 | 152 | thread_count: Thread.list.size |
153 | 153 | } |
154 | 154 | end |
155 | | - |
| 155 | + |
156 | 156 | # Verify efficiency: |
157 | 157 | # 1. All requests should use the same instance |
158 | 158 | unique_instance_ids = request_results.map { |r| r[:optimizely_instance_id] }.uniq |
159 | 159 | expect(unique_instance_ids.size).to eq(1) |
160 | | - |
| 160 | + |
161 | 161 | # 2. Only one instance should be cached |
162 | 162 | expect(Optimizely::Project.cached_instance_count).to eq(1) |
163 | | - |
| 163 | + |
164 | 164 | # 3. Thread count should be stable after initial ramp-up |
165 | 165 | final_thread_counts = request_results.last(10).map { |r| r[:thread_count] } |
166 | 166 | expect(final_thread_counts.uniq.size).to be <= 2 # Allow for minimal variance |
167 | | - |
| 167 | + |
168 | 168 | # 4. No significant thread growth |
169 | 169 | final_thread_count = Thread.list.size |
170 | 170 | expect(final_thread_count).to be <= initial_thread_count + 10 |
171 | 171 | end |
172 | 172 | end |
173 | | - |
| 173 | + |
174 | 174 | describe 'Memory Safety Guarantees' do |
175 | 175 | it 'should not cache instances with dynamic configuration' do |
176 | 176 | # These should not be cached due to having dynamic config |
177 | 177 | instance_with_sdk_key = Optimizely::Project.get_or_create_instance( |
178 | 178 | datafile: datafile, |
179 | 179 | sdk_key: 'test_key' |
180 | 180 | ) |
181 | | - |
| 181 | + |
182 | 182 | instance_with_user_profile = Optimizely::Project.get_or_create_instance( |
183 | 183 | datafile: datafile, |
184 | 184 | user_profile_service: double('user_profile_service') |
185 | 185 | ) |
186 | | - |
| 186 | + |
187 | 187 | # Should have 0 cached instances since these shouldn't be cached |
188 | 188 | expect(Optimizely::Project.cached_instance_count).to eq(0) |
189 | | - |
| 189 | + |
190 | 190 | # Clean up the non-cached instances |
191 | 191 | instance_with_sdk_key.close |
192 | 192 | instance_with_user_profile.close |
|
195 | 195 | it 'should handle finalizer cleanup gracefully' do |
196 | 196 | # Test that finalizers work when instances are not explicitly closed |
197 | 197 | Optimizely::Project.get_or_create_instance(datafile: datafile) |
198 | | - |
| 198 | + |
199 | 199 | expect(Optimizely::Project.cached_instance_count).to eq(1) |
200 | | - |
| 200 | + |
201 | 201 | # Force garbage collection to trigger finalizer |
202 | 202 | GC.start |
203 | | - |
| 203 | + |
204 | 204 | # The finalizer should have been called, but the instance might still be |
205 | 205 | # in cache until explicitly removed. This tests that the finalizer |
206 | 206 | # doesn't crash the system. |
|
0 commit comments