@@ -49,6 +49,48 @@ struct vc4_dev {
49
49
50
50
/* Protects bo_cache and the BO stats. */
51
51
struct mutex bo_lock ;
52
+
53
+ /* Sequence number for the last job queued in job_list.
54
+ * Starts at 0 (no jobs emitted).
55
+ */
56
+ uint64_t emit_seqno ;
57
+
58
+ /* Sequence number for the last completed job on the GPU.
59
+ * Starts at 0 (no jobs completed).
60
+ */
61
+ uint64_t finished_seqno ;
62
+
63
+ /* List of all struct vc4_exec_info for jobs to be executed.
64
+ * The first job in the list is the one currently programmed
65
+ * into ct0ca/ct1ca for execution.
66
+ */
67
+ struct list_head job_list ;
68
+ /* List of the finished vc4_exec_infos waiting to be freed by
69
+ * job_done_work.
70
+ */
71
+ struct list_head job_done_list ;
72
+ /* Spinlock used to synchronize the job_list and seqno
73
+ * accesses between the IRQ handler and GEM ioctls.
74
+ */
75
+ spinlock_t job_lock ;
76
+ wait_queue_head_t job_wait_queue ;
77
+ struct work_struct job_done_work ;
78
+
79
+ /* The binner overflow memory that's currently set up in
80
+ * BPOA/BPOS registers. When overflow occurs and a new one is
81
+ * allocated, the previous one will be moved to
82
+ * vc4->current_exec's free list.
83
+ */
84
+ struct vc4_bo * overflow_mem ;
85
+ struct work_struct overflow_mem_work ;
86
+
87
+ struct {
88
+ uint32_t last_ct0ca , last_ct1ca ;
89
+ struct timer_list timer ;
90
+ struct work_struct reset_work ;
91
+ } hangcheck ;
92
+
93
+ struct semaphore async_modeset ;
52
94
};
53
95
54
96
static inline struct vc4_dev *
@@ -60,6 +102,9 @@ to_vc4_dev(struct drm_device *dev)
60
102
struct vc4_bo {
61
103
struct drm_gem_cma_object base ;
62
104
105
+ /* seqno of the last job to render to this BO. */
106
+ uint64_t seqno ;
107
+
63
108
/* List entry for the BO's position in either
64
109
* vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
65
110
*/
@@ -130,6 +175,101 @@ to_vc4_encoder(struct drm_encoder *encoder)
130
175
#define HVS_READ (offset ) readl(vc4->hvs->regs + offset)
131
176
#define HVS_WRITE (offset , val ) writel(val, vc4->hvs->regs + offset)
132
177
178
+ struct vc4_exec_info {
179
+ /* Sequence number for this bin/render job. */
180
+ uint64_t seqno ;
181
+
182
+ /* Kernel-space copy of the ioctl arguments */
183
+ struct drm_vc4_submit_cl * args ;
184
+
185
+ /* This is the array of BOs that were looked up at the start of exec.
186
+ * Command validation will use indices into this array.
187
+ */
188
+ struct drm_gem_cma_object * * bo ;
189
+ uint32_t bo_count ;
190
+
191
+ /* Pointers for our position in vc4->job_list */
192
+ struct list_head head ;
193
+
194
+ /* List of other BOs used in the job that need to be released
195
+ * once the job is complete.
196
+ */
197
+ struct list_head unref_list ;
198
+
199
+ /* Current unvalidated indices into @bo loaded by the non-hardware
200
+ * VC4_PACKET_GEM_HANDLES.
201
+ */
202
+ uint32_t bo_index [2 ];
203
+
204
+ /* This is the BO where we store the validated command lists, shader
205
+ * records, and uniforms.
206
+ */
207
+ struct drm_gem_cma_object * exec_bo ;
208
+
209
+ /**
210
+ * This tracks the per-shader-record state (packet 64) that
211
+ * determines the length of the shader record and the offset
212
+ * it's expected to be found at. It gets read in from the
213
+ * command lists.
214
+ */
215
+ struct vc4_shader_state {
216
+ uint32_t addr ;
217
+ /* Maximum vertex index referenced by any primitive using this
218
+ * shader state.
219
+ */
220
+ uint32_t max_index ;
221
+ } * shader_state ;
222
+
223
+ /** How many shader states the user declared they were using. */
224
+ uint32_t shader_state_size ;
225
+ /** How many shader state records the validator has seen. */
226
+ uint32_t shader_state_count ;
227
+
228
+ bool found_tile_binning_mode_config_packet ;
229
+ bool found_start_tile_binning_packet ;
230
+ bool found_increment_semaphore_packet ;
231
+ bool found_flush ;
232
+ uint8_t bin_tiles_x , bin_tiles_y ;
233
+ struct drm_gem_cma_object * tile_bo ;
234
+ uint32_t tile_alloc_offset ;
235
+
236
+ /**
237
+ * Computed addresses pointing into exec_bo where we start the
238
+ * bin thread (ct0) and render thread (ct1).
239
+ */
240
+ uint32_t ct0ca , ct0ea ;
241
+ uint32_t ct1ca , ct1ea ;
242
+
243
+ /* Pointer to the unvalidated bin CL (if present). */
244
+ void * bin_u ;
245
+
246
+ /* Pointers to the shader recs. These paddr gets incremented as CL
247
+ * packets are relocated in validate_gl_shader_state, and the vaddrs
248
+ * (u and v) get incremented and size decremented as the shader recs
249
+ * themselves are validated.
250
+ */
251
+ void * shader_rec_u ;
252
+ void * shader_rec_v ;
253
+ uint32_t shader_rec_p ;
254
+ uint32_t shader_rec_size ;
255
+
256
+ /* Pointers to the uniform data. These pointers are incremented, and
257
+ * size decremented, as each batch of uniforms is uploaded.
258
+ */
259
+ void * uniforms_u ;
260
+ void * uniforms_v ;
261
+ uint32_t uniforms_p ;
262
+ uint32_t uniforms_size ;
263
+ };
264
+
265
+ static inline struct vc4_exec_info *
266
+ vc4_first_job (struct vc4_dev * vc4 )
267
+ {
268
+ if (list_empty (& vc4 -> job_list ))
269
+ return NULL ;
270
+ return list_first_entry (& vc4 -> job_list , struct vc4_exec_info , head );
271
+ }
272
+
133
273
/**
134
274
* struct vc4_texture_sample_info - saves the offsets into the UBO for texture
135
275
* setup parameters.
@@ -231,10 +371,31 @@ void vc4_debugfs_cleanup(struct drm_minor *minor);
231
371
/* vc4_drv.c */
232
372
void __iomem * vc4_ioremap_regs (struct platform_device * dev , int index );
233
373
374
+ /* vc4_gem.c */
375
+ void vc4_gem_init (struct drm_device * dev );
376
+ void vc4_gem_destroy (struct drm_device * dev );
377
+ int vc4_submit_cl_ioctl (struct drm_device * dev , void * data ,
378
+ struct drm_file * file_priv );
379
+ int vc4_wait_seqno_ioctl (struct drm_device * dev , void * data ,
380
+ struct drm_file * file_priv );
381
+ int vc4_wait_bo_ioctl (struct drm_device * dev , void * data ,
382
+ struct drm_file * file_priv );
383
+ void vc4_submit_next_job (struct drm_device * dev );
384
+ int vc4_wait_for_seqno (struct drm_device * dev , uint64_t seqno ,
385
+ uint64_t timeout_ns , bool interruptible );
386
+ void vc4_job_handle_completed (struct vc4_dev * vc4 );
387
+
234
388
/* vc4_hdmi.c */
235
389
extern struct platform_driver vc4_hdmi_driver ;
236
390
int vc4_hdmi_debugfs_regs (struct seq_file * m , void * unused );
237
391
392
+ /* vc4_irq.c */
393
+ irqreturn_t vc4_irq (int irq , void * arg );
394
+ void vc4_irq_preinstall (struct drm_device * dev );
395
+ int vc4_irq_postinstall (struct drm_device * dev );
396
+ void vc4_irq_uninstall (struct drm_device * dev );
397
+ void vc4_irq_reset (struct drm_device * dev );
398
+
238
399
/* vc4_hvs.c */
239
400
extern struct platform_driver vc4_hvs_driver ;
240
401
void vc4_hvs_dump_state (struct drm_device * dev );
@@ -253,6 +414,27 @@ u32 vc4_plane_dlist_size(struct drm_plane_state *state);
253
414
extern struct platform_driver vc4_v3d_driver ;
254
415
int vc4_v3d_debugfs_ident (struct seq_file * m , void * unused );
255
416
int vc4_v3d_debugfs_regs (struct seq_file * m , void * unused );
417
+ int vc4_v3d_set_power (struct vc4_dev * vc4 , bool on );
418
+
419
+ /* vc4_validate.c */
420
+ int
421
+ vc4_validate_bin_cl (struct drm_device * dev ,
422
+ void * validated ,
423
+ void * unvalidated ,
424
+ struct vc4_exec_info * exec );
425
+
426
+ int
427
+ vc4_validate_shader_recs (struct drm_device * dev , struct vc4_exec_info * exec );
428
+
429
+ struct drm_gem_cma_object * vc4_use_bo (struct vc4_exec_info * exec ,
430
+ uint32_t hindex );
431
+
432
+ int vc4_get_rcl (struct drm_device * dev , struct vc4_exec_info * exec );
433
+
434
+ bool vc4_check_tex_size (struct vc4_exec_info * exec ,
435
+ struct drm_gem_cma_object * fbo ,
436
+ uint32_t offset , uint8_t tiling_format ,
437
+ uint32_t width , uint32_t height , uint8_t cpp );
256
438
257
439
/* vc4_validate_shader.c */
258
440
struct vc4_validated_shader_info *
0 commit comments