1515/*
1616 * Power Management:
1717 */
18+ static unsigned long get_freq (struct msm_gpu * gpu );
1819
1920static int msm_devfreq_target (struct device * dev , unsigned long * freq ,
2021 u32 flags )
@@ -23,25 +24,30 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
2324 struct msm_gpu_devfreq * df = & gpu -> devfreq ;
2425 struct dev_pm_opp * opp ;
2526
27+ // opp-suspend kacke ....
28+ if (df -> suspended )
29+ dev_err (dev , "%s while suspended ??\n" , __func__ );
30+
31+ unsigned long curr_freq = get_freq (gpu );
32+ if (* freq == curr_freq )
33+ return 0 ;
34+
2635 /*
2736 * Note that devfreq_recommended_opp() can modify the freq
2837 * to something that actually is in the opp table:
2938 */
39+ unsigned long fff = * freq ;
3040 opp = devfreq_recommended_opp (dev , freq , flags );
3141 if (IS_ERR (opp ))
3242 return PTR_ERR (opp );
3343
34- trace_msm_gpu_freq_change (dev_pm_opp_get_freq (opp ));
3544
36- /*
37- * If the GPU is idle, devfreq is not aware, so just stash
38- * the new target freq (to use when we return to active)
39- */
40- if (df -> idle_freq ) {
41- df -> idle_freq = * freq ;
42- dev_pm_opp_put (opp );
45+ if (* freq == curr_freq )
4346 return 0 ;
44- }
47+
48+ dev_dbg (dev , "%s %lu => %lu (%lu)\n" , __func__ , get_freq (gpu ), * freq , fff );
49+
50+ trace_msm_gpu_freq_change (dev_pm_opp_get_freq (opp ));
4551
4652 if (gpu -> funcs -> gpu_set_freq ) {
4753 mutex_lock (& df -> lock );
@@ -58,16 +64,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
5864
5965static unsigned long get_freq (struct msm_gpu * gpu )
6066{
61- struct msm_gpu_devfreq * df = & gpu -> devfreq ;
62-
63- /*
64- * If the GPU is idle, use the shadow/saved freq to avoid
65- * confusing devfreq (which is unaware that we are switching
66- * to lowest freq until the device is active again)
67- */
68- if (df -> idle_freq )
69- return df -> idle_freq ;
70-
7167 if (gpu -> funcs -> gpu_get_freq )
7268 return gpu -> funcs -> gpu_get_freq (gpu );
7369
@@ -136,9 +132,6 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
136132 .get_cur_freq = msm_devfreq_get_cur_freq ,
137133};
138134
139- static void msm_devfreq_boost_work (struct kthread_work * work );
140- static void msm_devfreq_idle_work (struct kthread_work * work );
141-
142135static bool has_devfreq (struct msm_gpu * gpu )
143136{
144137 struct msm_gpu_devfreq * df = & gpu -> devfreq ;
@@ -165,9 +158,6 @@ void msm_devfreq_init(struct msm_gpu *gpu)
165158
166159 mutex_init (& df -> lock );
167160
168- dev_pm_qos_add_request (& gpu -> pdev -> dev , & df -> boost_freq ,
169- DEV_PM_QOS_MIN_FREQUENCY , 0 );
170-
171161 msm_devfreq_profile .initial_freq = gpu -> fast_rate ;
172162
173163 /*
@@ -185,7 +175,6 @@ void msm_devfreq_init(struct msm_gpu *gpu)
185175
186176 if (IS_ERR (df -> devfreq )) {
187177 DRM_DEV_ERROR (& gpu -> pdev -> dev , "Couldn't initialize GPU devfreq\n" );
188- dev_pm_qos_remove_request (& df -> boost_freq );
189178 df -> devfreq = NULL ;
190179 return ;
191180 }
@@ -199,22 +188,6 @@ void msm_devfreq_init(struct msm_gpu *gpu)
199188 gpu -> cooling = NULL ;
200189 }
201190
202- msm_hrtimer_work_init (& df -> boost_work , gpu -> worker , msm_devfreq_boost_work ,
203- CLOCK_MONOTONIC , HRTIMER_MODE_REL );
204- msm_hrtimer_work_init (& df -> idle_work , gpu -> worker , msm_devfreq_idle_work ,
205- CLOCK_MONOTONIC , HRTIMER_MODE_REL );
206- }
207-
208- static void cancel_idle_work (struct msm_gpu_devfreq * df )
209- {
210- hrtimer_cancel (& df -> idle_work .timer );
211- kthread_cancel_work_sync (& df -> idle_work .work );
212- }
213-
214- static void cancel_boost_work (struct msm_gpu_devfreq * df )
215- {
216- hrtimer_cancel (& df -> boost_work .timer );
217- kthread_cancel_work_sync (& df -> boost_work .work );
218191}
219192
220193void msm_devfreq_cleanup (struct msm_gpu * gpu )
@@ -224,8 +197,8 @@ void msm_devfreq_cleanup(struct msm_gpu *gpu)
224197 if (!has_devfreq (gpu ))
225198 return ;
226199
200+ devm_devfreq_remove_device (& gpu -> pdev -> dev , df -> devfreq );
227201 devfreq_cooling_unregister (gpu -> cooling );
228- dev_pm_qos_remove_request (& df -> boost_freq );
229202}
230203
231204void msm_devfreq_resume (struct msm_gpu * gpu )
@@ -239,7 +212,6 @@ void msm_devfreq_resume(struct msm_gpu *gpu)
239212 mutex_lock (& df -> lock );
240213 df -> busy_cycles = gpu -> funcs -> gpu_busy (gpu , & sample_rate );
241214 df -> time = ktime_get ();
242- df -> idle_freq = 0 ;
243215 df -> suspended = false;
244216 mutex_unlock (& df -> lock );
245217
@@ -257,123 +229,6 @@ void msm_devfreq_suspend(struct msm_gpu *gpu)
257229 df -> suspended = true;
258230 mutex_unlock (& df -> lock );
259231
260- cancel_idle_work (df );
261- cancel_boost_work (df );
262-
263232 devfreq_suspend_device (df -> devfreq );
264233}
265234
266- static void msm_devfreq_boost_work (struct kthread_work * work )
267- {
268- struct msm_gpu_devfreq * df = container_of (work ,
269- struct msm_gpu_devfreq , boost_work .work );
270-
271- dev_pm_qos_update_request (& df -> boost_freq , 0 );
272- }
273-
274- void msm_devfreq_boost (struct msm_gpu * gpu , unsigned factor )
275- {
276- struct msm_gpu_devfreq * df = & gpu -> devfreq ;
277- uint64_t freq ;
278-
279- if (!has_devfreq (gpu ))
280- return ;
281-
282- freq = get_freq (gpu );
283- freq *= factor ;
284-
285- /*
286- * A nice little trap is that PM QoS operates in terms of KHz,
287- * while devfreq operates in terms of Hz:
288- */
289- do_div (freq , HZ_PER_KHZ );
290-
291- dev_pm_qos_update_request (& df -> boost_freq , freq );
292-
293- msm_hrtimer_queue_work (& df -> boost_work ,
294- ms_to_ktime (msm_devfreq_profile .polling_ms ),
295- HRTIMER_MODE_REL );
296- }
297-
298- void msm_devfreq_active (struct msm_gpu * gpu )
299- {
300- struct msm_gpu_devfreq * df = & gpu -> devfreq ;
301- unsigned int idle_time ;
302- unsigned long target_freq ;
303-
304- if (!has_devfreq (gpu ))
305- return ;
306-
307- /*
308- * Cancel any pending transition to idle frequency:
309- */
310- cancel_idle_work (df );
311-
312- /*
313- * Hold devfreq lock to synchronize with get_dev_status()/
314- * target() callbacks
315- */
316- mutex_lock (& df -> devfreq -> lock );
317-
318- target_freq = df -> idle_freq ;
319-
320- idle_time = ktime_to_ms (ktime_sub (ktime_get (), df -> idle_time ));
321-
322- df -> idle_freq = 0 ;
323-
324- /*
325- * We could have become active again before the idle work had a
326- * chance to run, in which case the df->idle_freq would have
327- * still been zero. In this case, no need to change freq.
328- */
329- if (target_freq )
330- msm_devfreq_target (& gpu -> pdev -> dev , & target_freq , 0 );
331-
332- mutex_unlock (& df -> devfreq -> lock );
333-
334- /*
335- * If we've been idle for a significant fraction of a polling
336- * interval, then we won't meet the threshold of busyness for
337- * the governor to ramp up the freq.. so give some boost
338- */
339- if (idle_time > msm_devfreq_profile .polling_ms ) {
340- msm_devfreq_boost (gpu , 2 );
341- }
342- }
343-
344-
345- static void msm_devfreq_idle_work (struct kthread_work * work )
346- {
347- struct msm_gpu_devfreq * df = container_of (work ,
348- struct msm_gpu_devfreq , idle_work .work );
349- struct msm_gpu * gpu = container_of (df , struct msm_gpu , devfreq );
350- struct msm_drm_private * priv = gpu -> dev -> dev_private ;
351- unsigned long idle_freq , target_freq = 0 ;
352-
353- /*
354- * Hold devfreq lock to synchronize with get_dev_status()/
355- * target() callbacks
356- */
357- mutex_lock (& df -> devfreq -> lock );
358-
359- idle_freq = get_freq (gpu );
360-
361- if (priv -> gpu_clamp_to_idle )
362- msm_devfreq_target (& gpu -> pdev -> dev , & target_freq , 0 );
363-
364- df -> idle_time = ktime_get ();
365- df -> idle_freq = idle_freq ;
366-
367- mutex_unlock (& df -> devfreq -> lock );
368- }
369-
370- void msm_devfreq_idle (struct msm_gpu * gpu )
371- {
372- struct msm_gpu_devfreq * df = & gpu -> devfreq ;
373-
374- if (!has_devfreq (gpu ))
375- return ;
376-
377- msm_hrtimer_queue_work (& df -> idle_work , ms_to_ktime (1 ),
378- HRTIMER_MODE_REL );
379- }
0 commit comments