@@ -911,6 +911,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
911911 if (adreno_is_a618 (adreno_gpu ))
912912 return ;
913913
914+ if (adreno_is_a619_holi (adreno_gpu ))
915+ hbb_lo = 0 ;
916+
914917 if (adreno_is_a640_family (adreno_gpu ))
915918 amsbc = 1 ;
916919
@@ -1135,7 +1138,12 @@ static int hw_init(struct msm_gpu *gpu)
11351138 }
11361139
11371140 /* Clear GBIF halt in case GX domain was not collapsed */
1138- if (a6xx_has_gbif (adreno_gpu )) {
1141+ if (adreno_is_a619_holi (adreno_gpu )) {
1142+ gpu_write (gpu , REG_A6XX_GBIF_HALT , 0 );
1143+ gpu_write (gpu , REG_A6XX_RBBM_GPR0_CNTL , 0 );
1144+ /* Let's make extra sure that the GPU can access the memory.. */
1145+ mb ();
1146+ } else if (a6xx_has_gbif (adreno_gpu )) {
11391147 gpu_write (gpu , REG_A6XX_GBIF_HALT , 0 );
11401148 gpu_write (gpu , REG_A6XX_RBBM_GBIF_HALT , 0 );
11411149 /* Let's make extra sure that the GPU can access the memory.. */
@@ -1144,6 +1152,9 @@ static int hw_init(struct msm_gpu *gpu)
11441152
11451153 gpu_write (gpu , REG_A6XX_RBBM_SECVID_TSB_CNTL , 0 );
11461154
1155+ if (adreno_is_a619_holi (adreno_gpu ))
1156+ a6xx_sptprac_enable (gmu );
1157+
11471158 /*
11481159 * Disable the trusted memory range - we don't actually supported secure
11491160 * memory rendering at this point in time and we don't want to block off
@@ -1760,12 +1771,18 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
17601771#define GBIF_CLIENT_HALT_MASK BIT(0)
17611772#define GBIF_ARB_HALT_MASK BIT(1)
17621773#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
1774+ #define VBIF_RESET_ACK_MASK 0xF0
1775+ #define GPR0_GBIF_HALT_REQUEST 0x1E0
17631776
17641777void a6xx_bus_clear_pending_transactions (struct adreno_gpu * adreno_gpu , bool gx_off )
17651778{
17661779 struct msm_gpu * gpu = & adreno_gpu -> base ;
17671780
1768- if (!a6xx_has_gbif (adreno_gpu )) {
1781+ if (adreno_is_a619_holi (adreno_gpu )) {
1782+ gpu_write (gpu , REG_A6XX_RBBM_GPR0_CNTL , GPR0_GBIF_HALT_REQUEST );
1783+ spin_until ((gpu_read (gpu , REG_A6XX_RBBM_VBIF_GX_RESET_STATUS ) &
1784+ (VBIF_RESET_ACK_MASK )) == VBIF_RESET_ACK_MASK );
1785+ } else if (!a6xx_has_gbif (adreno_gpu )) {
17691786 gpu_write (gpu , REG_A6XX_VBIF_XIN_HALT_CTRL0 , VBIF_XIN_HALT_CTRL0_MASK );
17701787 spin_until ((gpu_read (gpu , REG_A6XX_VBIF_XIN_HALT_CTRL1 ) &
17711788 (VBIF_XIN_HALT_CTRL0_MASK )) == VBIF_XIN_HALT_CTRL0_MASK );
@@ -1861,6 +1878,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
18611878 if (ret )
18621879 goto err_bulk_clk ;
18631880
1881+ if (adreno_is_a619_holi (adreno_gpu ))
1882+ a6xx_sptprac_enable (gmu );
1883+
18641884 /* If anything goes south, tear the GPU down piece by piece.. */
18651885 if (ret ) {
18661886err_bulk_clk :
@@ -1920,6 +1940,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
19201940 /* Drain the outstanding traffic on memory buses */
19211941 a6xx_bus_clear_pending_transactions (adreno_gpu , true);
19221942
1943+ if (adreno_is_a619_holi (adreno_gpu ))
1944+ a6xx_sptprac_disable (gmu );
1945+
19231946 clk_bulk_disable_unprepare (gpu -> nr_clocks , gpu -> grp_clks );
19241947
19251948 pm_runtime_put_sync (gmu -> gxpd );
0 commit comments