28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
36 #include <drm/i915_drm.h>
39 #define DP_RECEIVER_CAP_SIZE 0xf
40 #define DP_LINK_STATUS_SIZE 6
41 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
76 return is_edp(intel_dp) && !is_pch_edp(intel_dp);
104 intel_dp = enc_to_intel_dp(encoder);
142 switch (max_lane_count) {
143 case 1:
case 2:
case 4:
148 return max_lane_count;
152 intel_dp_max_link_bw(
struct intel_dp *intel_dp)
156 switch (max_link_bw) {
196 return (pixel_clock * bpp + 9) / 10;
200 intel_dp_max_data_rate(
int max_link_clock,
int max_lanes)
202 return (max_link_clock * max_lanes * 8) / 10;
206 intel_dp_adjust_dithering(
struct intel_dp *intel_dp,
210 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
211 int max_lanes = intel_dp_max_lane_count(intel_dp);
212 int max_rate, mode_rate;
214 mode_rate = intel_dp_link_required(mode->
clock, 24);
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
217 if (mode_rate > max_rate) {
218 mode_rate = intel_dp_link_required(mode->
clock, 18);
219 if (mode_rate > max_rate)
236 struct intel_dp *intel_dp = intel_attached_dp(connector);
246 if (!intel_dp_adjust_dithering(intel_dp, mode,
false))
249 if (mode->
clock < 10000)
266 for (i = 0; i < src_bytes; i++)
277 for (i = 0; i < dst_bytes; i++)
278 dst[i] = src >> ((3-i) * 8);
311 static bool ironlake_edp_have_panel_power(
struct intel_dp *intel_dp)
319 static bool ironlake_edp_have_panel_vdd(
struct intel_dp *intel_dp)
328 intel_dp_check_edp(
struct intel_dp *intel_dp)
333 if (!is_edp(intel_dp))
335 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
336 WARN(1,
"eDP powered off while attempting aux channel communication.\n");
337 DRM_DEBUG_KMS(
"Status 0x%08x Control 0x%08x\n",
344 intel_dp_aux_ch(
struct intel_dp *intel_dp,
351 uint32_t ch_ctl = output_reg + 0x10;
359 intel_dp_check_edp(intel_dp);
367 if (is_cpu_edp(intel_dp)) {
369 aux_clock_divider = 200;
371 aux_clock_divider = 225;
373 aux_clock_divider = 63;
375 aux_clock_divider = intel_hrawclk(dev) / 2;
383 for (
try = 0;
try < 3;
try++) {
391 WARN(1,
"dp_aux_ch not started status 0x%08x\n",
397 for (
try = 0;
try < 5;
try++) {
399 for (i = 0; i < send_bytes; i += 4)
401 pack_aux(send + i, send_bytes - i));
435 DRM_ERROR(
"dp_aux_ch not done status 0x%08x\n", status);
443 DRM_ERROR(
"dp_aux_ch receive error status 0x%08x\n", status);
450 DRM_DEBUG_KMS(
"dp_aux_ch timeout status 0x%08x\n", status);
457 if (recv_bytes > recv_size)
458 recv_bytes = recv_size;
460 for (i = 0; i < recv_bytes; i += 4)
462 recv +
i, recv_bytes -
i);
469 intel_dp_aux_native_write(
struct intel_dp *intel_dp,
477 intel_dp_check_edp(intel_dp);
481 msg[1] = address >> 8;
482 msg[2] = address & 0xff;
483 msg[3] = send_bytes - 1;
484 memcpy(&msg[4], send, send_bytes);
485 msg_bytes = send_bytes + 4;
487 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
502 intel_dp_aux_native_write_1(
struct intel_dp *intel_dp,
505 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
510 intel_dp_aux_native_read(
struct intel_dp *intel_dp,
520 intel_dp_check_edp(intel_dp);
522 msg[1] = address >> 8;
523 msg[2] = address & 0xff;
524 msg[3] = recv_bytes - 1;
527 reply_bytes = recv_bytes + 1;
530 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
538 memcpy(recv, reply + 1, ret - 1);
564 intel_dp_check_edp(intel_dp);
574 msg[1] = address >> 8;
595 for (retry = 0; retry < 5; retry++) {
596 ret = intel_dp_aux_ch(intel_dp,
600 DRM_DEBUG_KMS(
"aux_ch failed %d\n", ret);
604 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
611 DRM_DEBUG_KMS(
"aux_ch native nack\n");
617 DRM_ERROR(
"aux_ch invalid native reply 0x%02x\n",
624 if (mode == MODE_I2C_READ) {
625 *read_byte = reply[1];
627 return reply_bytes - 1;
629 DRM_DEBUG_KMS(
"aux_i2c nack\n");
632 DRM_DEBUG_KMS(
"aux_i2c defer\n");
636 DRM_ERROR(
"aux_i2c invalid reply 0x%02x\n", reply[0]);
641 DRM_ERROR(
"too many retries, giving up\n");
645 static void ironlake_edp_panel_vdd_on(
struct intel_dp *intel_dp);
646 static void ironlake_edp_panel_vdd_off(
struct intel_dp *intel_dp,
bool sync);
649 intel_dp_i2c_init(
struct intel_dp *intel_dp,
654 DRM_DEBUG_KMS(
"i2c_init %s\n", name);
655 intel_dp->
algo.running =
false;
656 intel_dp->
algo.address = 0;
657 intel_dp->
algo.aux_ch = intel_dp_i2c_aux_ch;
663 intel_dp->
adapter.name[
sizeof(intel_dp->
adapter.name) - 1] =
'\0';
665 intel_dp->
adapter.dev.parent = &intel_connector->
base.kdev;
667 ironlake_edp_panel_vdd_on(intel_dp);
669 ironlake_edp_panel_vdd_off(intel_dp,
false);
679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
681 int max_lane_count = intel_dp_max_lane_count(intel_dp);
682 int max_clock = intel_dp_max_link_bw(intel_dp) ==
DP_LINK_BW_2_7 ? 1 : 0;
689 mode, adjusted_mode);
695 DRM_DEBUG_KMS(
"DP link computation with max lane count %i "
696 "max bw %02x pixel clock %iKHz\n",
697 max_lane_count, bws[max_clock], adjusted_mode->
clock);
699 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode,
true))
703 mode_rate = intel_dp_link_required(adjusted_mode->
clock, bpp);
705 for (clock = 0; clock <= max_clock; clock++) {
706 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
707 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
709 if (mode_rate <= link_avail) {
712 adjusted_mode->
clock = intel_dp_link_clock(intel_dp->
link_bw);
713 DRM_DEBUG_KMS(
"DP link bw %02x lane "
714 "count %d clock %d bpp %d\n",
716 adjusted_mode->
clock, bpp);
717 DRM_DEBUG_KMS(
"DP link bw required %i available %i\n",
718 mode_rate, link_avail);
738 while (*num > 0xffffff || *den > 0xffffff) {
745 intel_dp_compute_m_n(
int bpp,
753 m_n->
gmch_n = link_clock * nlanes;
776 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->
base);
791 intel_dp_compute_m_n(intel_crtc->
bpp, lane_count,
817 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
858 DRM_DEBUG_DRIVER(
"Enabling DP audio on pipe %c\n",
877 if (is_cpu_edp(intel_dp) &&
IS_GEN7(dev)) {
887 intel_dp->
DP |= intel_crtc->
pipe << 29;
890 if (adjusted_mode->
clock < 200000)
894 }
else if (!
HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
906 if (intel_crtc->
pipe == 1)
909 if (is_cpu_edp(intel_dp)) {
911 if (adjusted_mode->
clock < 200000)
921 #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
922 #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
924 #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
925 #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
927 #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
928 #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
930 static void ironlake_wait_panel_status(
struct intel_dp *intel_dp,
937 DRM_DEBUG_KMS(
"mask %08x value %08x status %08x control %08x\n",
943 DRM_ERROR(
"Panel status timeout: status %08x control %08x\n",
949 static void ironlake_wait_panel_on(
struct intel_dp *intel_dp)
951 DRM_DEBUG_KMS(
"Wait for panel power on\n");
955 static void ironlake_wait_panel_off(
struct intel_dp *intel_dp)
957 DRM_DEBUG_KMS(
"Wait for panel power off time\n");
961 static void ironlake_wait_panel_power_cycle(
struct intel_dp *intel_dp)
963 DRM_DEBUG_KMS(
"Wait for panel power cycle\n");
981 static void ironlake_edp_panel_vdd_on(
struct intel_dp *intel_dp)
987 if (!is_edp(intel_dp))
989 DRM_DEBUG_KMS(
"Turn eDP VDD on\n");
992 "eDP VDD already requested on\n");
996 if (ironlake_edp_have_panel_vdd(intel_dp)) {
997 DRM_DEBUG_KMS(
"eDP VDD already on\n");
1001 if (!ironlake_edp_have_panel_power(intel_dp))
1002 ironlake_wait_panel_power_cycle(intel_dp);
1004 pp = ironlake_get_pp_control(dev_priv);
1008 DRM_DEBUG_KMS(
"PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1014 if (!ironlake_edp_have_panel_power(intel_dp)) {
1015 DRM_DEBUG_KMS(
"eDP was not running\n");
1020 static void ironlake_panel_vdd_off_sync(
struct intel_dp *intel_dp)
1026 if (!intel_dp->
want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1027 pp = ironlake_get_pp_control(dev_priv);
1033 DRM_DEBUG_KMS(
"PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1040 static void ironlake_panel_vdd_work(
struct work_struct *__work)
1042 struct intel_dp *intel_dp =
container_of(to_delayed_work(__work),
1047 ironlake_panel_vdd_off_sync(intel_dp);
1051 static void ironlake_edp_panel_vdd_off(
struct intel_dp *intel_dp,
bool sync)
1053 if (!is_edp(intel_dp))
1056 DRM_DEBUG_KMS(
"Turn eDP VDD off %d\n", intel_dp->
want_panel_vdd);
1062 ironlake_panel_vdd_off_sync(intel_dp);
1074 static void ironlake_edp_panel_on(
struct intel_dp *intel_dp)
1080 if (!is_edp(intel_dp))
1083 DRM_DEBUG_KMS(
"Turn eDP power on\n");
1085 if (ironlake_edp_have_panel_power(intel_dp)) {
1086 DRM_DEBUG_KMS(
"eDP power already on\n");
1090 ironlake_wait_panel_power_cycle(intel_dp);
1092 pp = ironlake_get_pp_control(dev_priv);
1107 ironlake_wait_panel_on(intel_dp);
1116 static void ironlake_edp_panel_off(
struct intel_dp *intel_dp)
1122 if (!is_edp(intel_dp))
1125 DRM_DEBUG_KMS(
"Turn eDP power off\n");
1129 pp = ironlake_get_pp_control(dev_priv);
1138 ironlake_wait_panel_off(intel_dp);
1141 static void ironlake_edp_backlight_on(
struct intel_dp *intel_dp)
1147 if (!is_edp(intel_dp))
1150 DRM_DEBUG_KMS(
"\n");
1158 pp = ironlake_get_pp_control(dev_priv);
1164 static void ironlake_edp_backlight_off(
struct intel_dp *intel_dp)
1170 if (!is_edp(intel_dp))
1173 DRM_DEBUG_KMS(
"\n");
1174 pp = ironlake_get_pp_control(dev_priv);
1181 static void ironlake_edp_pll_on(
struct intel_dp *intel_dp)
1191 DRM_DEBUG_KMS(
"\n");
1206 static void ironlake_edp_pll_off(
struct intel_dp *intel_dp)
1218 "dp pll off, should be on\n");
1231 static void intel_dp_sink_dpms(
struct intel_dp *intel_dp,
int mode)
1240 ret = intel_dp_aux_native_write_1(intel_dp,
DP_SET_POWER,
1243 DRM_DEBUG_DRIVER(
"failed to write sink power state\n");
1249 for (i = 0; i < 3; i++) {
1250 ret = intel_dp_aux_native_write_1(intel_dp,
1260 static bool intel_dp_get_hw_state(
struct intel_encoder *encoder,
1263 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->
base);
1271 if (is_cpu_edp(intel_dp) &&
IS_GEN7(dev)) {
1273 }
else if (!
HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1303 DRM_DEBUG_KMS(
"No pipe for dp port 0x%x found\n", intel_dp->
output_reg);
1310 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->
base);
1314 ironlake_edp_panel_vdd_on(intel_dp);
1315 ironlake_edp_backlight_off(intel_dp);
1317 ironlake_edp_panel_off(intel_dp);
1320 if (!is_cpu_edp(intel_dp))
1321 intel_dp_link_down(intel_dp);
1324 static void intel_post_disable_dp(
struct intel_encoder *encoder)
1326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->
base);
1328 if (is_cpu_edp(intel_dp)) {
1329 intel_dp_link_down(intel_dp);
1330 ironlake_edp_pll_off(intel_dp);
1336 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->
base);
1341 if (
WARN_ON(dp_reg & DP_PORT_EN))
1344 ironlake_edp_panel_vdd_on(intel_dp);
1346 intel_dp_start_link_train(intel_dp);
1347 ironlake_edp_panel_on(intel_dp);
1348 ironlake_edp_panel_vdd_off(intel_dp,
true);
1349 intel_dp_complete_link_train(intel_dp);
1350 ironlake_edp_backlight_on(intel_dp);
1353 static void intel_pre_enable_dp(
struct intel_encoder *encoder)
1355 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->
base);
1357 if (is_cpu_edp(intel_dp))
1358 ironlake_edp_pll_on(intel_dp);
1366 intel_dp_aux_native_read_retry(
struct intel_dp *intel_dp,
uint16_t address,
1367 uint8_t *recv,
int recv_bytes)
1375 for (i = 0; i < 3; i++) {
1376 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1378 if (ret == recv_bytes)
1393 return intel_dp_aux_native_read_retry(intel_dp,
1396 DP_LINK_STATUS_SIZE);
1407 intel_get_adjust_request_voltage(
uint8_t adjust_request[2],
1410 int s = ((lane & 1) ?
1413 uint8_t l = adjust_request[lane>>1];
1419 intel_get_adjust_request_pre_emphasis(
uint8_t adjust_request[2],
1422 int s = ((lane & 1) ?
1425 uint8_t l = adjust_request[lane>>1];
1432 static char *voltage_names[] = {
1433 "0.4V",
"0.6V",
"0.8V",
"1.2V"
1435 static char *pre_emph_names[] = {
1436 "0dB",
"3.5dB",
"6dB",
"9.5dB"
1438 static char *link_train_names[] = {
1439 "pattern 1",
"pattern 2",
"idle",
"off"
1449 intel_dp_voltage_max(
struct intel_dp *intel_dp)
1453 if (
IS_GEN7(dev) && is_cpu_edp(intel_dp))
1455 else if (
HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1462 intel_dp_pre_emphasis_max(
struct intel_dp *intel_dp,
uint8_t voltage_swing)
1466 if (
IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1501 for (lane = 0; lane < intel_dp->
lane_count; lane++) {
1502 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1503 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1511 voltage_max = intel_dp_voltage_max(intel_dp);
1512 if (v >= voltage_max)
1515 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1516 if (p >= preemph_max)
1519 for (lane = 0; lane < 4; lane++)
1524 intel_dp_signal_levels(
uint8_t train_set)
1558 return signal_levels;
1563 intel_gen6_edp_signal_levels(
uint8_t train_set)
1565 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1567 switch (signal_levels) {
1583 DRM_DEBUG_KMS(
"Unsupported voltage swing/pre-emphasis level:"
1584 "0x%x\n", signal_levels);
1591 intel_gen7_edp_signal_levels(
uint8_t train_set)
1593 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1595 switch (signal_levels) {
1614 DRM_DEBUG_KMS(
"Unsupported voltage swing/pre-emphasis level:"
1615 "0x%x\n", signal_levels);
1624 int s = (lane & 1) * 4;
1627 return (l >> s) & 0xf;
1637 for (lane = 0; lane < lane_count; lane++) {
1638 lane_status = intel_get_lane_status(
link_status, lane);
1646 #define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
1647 DP_LANE_CHANNEL_EQ_DONE|\
1648 DP_LANE_SYMBOL_LOCKED)
1650 intel_channel_eq_ok(
struct intel_dp *intel_dp,
uint8_t link_status[DP_LINK_STATUS_SIZE])
1660 for (lane = 0; lane < intel_dp->
lane_count; lane++) {
1661 lane_status = intel_get_lane_status(
link_status, lane);
1669 intel_dp_set_link_train(
struct intel_dp *intel_dp,
1691 DRM_ERROR(
"DP training pattern 3 not supported\n");
1710 DRM_ERROR(
"DP training pattern 3 not supported\n");
1719 intel_dp_aux_native_write_1(intel_dp,
1725 ret = intel_dp_aux_native_write(intel_dp,
1738 intel_dp_start_link_train(
struct intel_dp *intel_dp)
1743 bool clock_recovery =
false;
1744 int voltage_tries, loop_tries;
1758 clock_recovery =
false;
1765 if (
IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1766 signal_levels = intel_gen7_edp_signal_levels(intel_dp->
train_set[0]);
1768 }
else if (
IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1769 signal_levels = intel_gen6_edp_signal_levels(intel_dp->
train_set[0]);
1772 signal_levels = intel_dp_signal_levels(intel_dp->
train_set[0]);
1773 DRM_DEBUG_KMS(
"training pattern 1 signal levels %08x\n", signal_levels);
1777 if (!intel_dp_set_link_train(intel_dp, DP,
1784 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1785 DRM_ERROR(
"failed to get link status\n");
1789 if (intel_clock_recovery_ok(link_status, intel_dp->
lane_count)) {
1790 DRM_DEBUG_KMS(
"clock recovery OK\n");
1791 clock_recovery =
true;
1799 if (i == intel_dp->
lane_count && voltage_tries == 5) {
1801 if (loop_tries == 5) {
1802 DRM_DEBUG_KMS(
"too many full retries, give up\n");
1811 if ((intel_dp->
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1813 if (voltage_tries == 5) {
1814 DRM_DEBUG_KMS(
"too many voltage retries, give up\n");
1822 intel_get_adjust_train(intel_dp, link_status);
1829 intel_dp_complete_link_train(
struct intel_dp *intel_dp)
1832 bool channel_eq =
false;
1833 int tries, cr_tries;
1846 DRM_ERROR(
"failed to train DP, aborting\n");
1847 intel_dp_link_down(intel_dp);
1851 if (
IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1852 signal_levels = intel_gen7_edp_signal_levels(intel_dp->
train_set[0]);
1854 }
else if (
IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1855 signal_levels = intel_gen6_edp_signal_levels(intel_dp->
train_set[0]);
1858 signal_levels = intel_dp_signal_levels(intel_dp->
train_set[0]);
1863 if (!intel_dp_set_link_train(intel_dp, DP,
1869 if (!intel_dp_get_link_status(intel_dp, link_status))
1873 if (!intel_clock_recovery_ok(link_status, intel_dp->
lane_count)) {
1874 intel_dp_start_link_train(intel_dp);
1879 if (intel_channel_eq_ok(intel_dp, link_status)) {
1886 intel_dp_link_down(intel_dp);
1887 intel_dp_start_link_train(intel_dp);
1894 intel_get_adjust_train(intel_dp, link_status);
1902 intel_dp_link_down(
struct intel_dp *intel_dp)
1911 DRM_DEBUG_KMS(
"\n");
1964 intel_dp_get_dpcd(
struct intel_dp *intel_dp)
1966 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->
dpcd,
1967 sizeof(intel_dp->
dpcd)) == 0)
1989 intel_dp_probe_oui(
struct intel_dp *intel_dp)
1996 ironlake_edp_panel_vdd_on(intel_dp);
1998 if (intel_dp_aux_native_read_retry(intel_dp,
DP_SINK_OUI, buf, 3))
1999 DRM_DEBUG_KMS(
"Sink OUI: %02hx%02hx%02hx\n",
2000 buf[0], buf[1], buf[2]);
2002 if (intel_dp_aux_native_read_retry(intel_dp,
DP_BRANCH_OUI, buf, 3))
2003 DRM_DEBUG_KMS(
"Branch OUI: %02hx%02hx%02hx\n",
2004 buf[0], buf[1], buf[2]);
2006 ironlake_edp_panel_vdd_off(intel_dp,
false);
2010 intel_dp_get_sink_irq(
struct intel_dp *intel_dp,
u8 *sink_irq_vector)
2014 ret = intel_dp_aux_native_read_retry(intel_dp,
2016 sink_irq_vector, 1);
2024 intel_dp_handle_test_request(
struct intel_dp *intel_dp)
2040 intel_dp_check_link_status(
struct intel_dp *intel_dp)
2045 if (!intel_dp->
base.connectors_active)
2052 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2053 intel_dp_link_down(intel_dp);
2058 if (!intel_dp_get_dpcd(intel_dp)) {
2059 intel_dp_link_down(intel_dp);
2065 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2067 intel_dp_aux_native_write_1(intel_dp,
2072 intel_dp_handle_test_request(intel_dp);
2074 DRM_DEBUG_DRIVER(
"CP or sink specific irq unhandled\n");
2077 if (!intel_channel_eq_ok(intel_dp, link_status)) {
2078 DRM_DEBUG_KMS(
"%s: channel EQ not ok, retraining\n",
2080 intel_dp_start_link_train(intel_dp);
2081 intel_dp_complete_link_train(intel_dp);
2087 intel_dp_detect_dpcd(
struct intel_dp *intel_dp)
2093 if (!intel_dp_get_dpcd(intel_dp))
2104 if (!intel_dp_aux_native_read_retry(intel_dp,
DP_SINK_COUNT,
2121 DRM_DEBUG_KMS(
"Broken DP branch device, ignoring\n");
2126 ironlake_dp_detect(
struct intel_dp *intel_dp)
2131 if (is_edp(intel_dp)) {
2138 return intel_dp_detect_dpcd(intel_dp);
2142 g4x_dp_detect(
struct intel_dp *intel_dp)
2165 return intel_dp_detect_dpcd(intel_dp);
2168 static struct edid *
2171 struct intel_dp *intel_dp = intel_attached_dp(connector);
2175 if (is_edp(intel_dp)) {
2176 if (!intel_dp->
edid)
2195 struct intel_dp *intel_dp = intel_attached_dp(connector);
2198 if (is_edp(intel_dp)) {
2221 struct intel_dp *intel_dp = intel_attached_dp(connector);
2224 struct edid *edid =
NULL;
2229 status = ironlake_dp_detect(intel_dp);
2231 status = g4x_dp_detect(intel_dp);
2233 DRM_DEBUG_KMS(
"DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
2234 intel_dp->
dpcd[0], intel_dp->
dpcd[1], intel_dp->
dpcd[2],
2235 intel_dp->
dpcd[3], intel_dp->
dpcd[4], intel_dp->
dpcd[5],
2236 intel_dp->
dpcd[6], intel_dp->
dpcd[7]);
2241 intel_dp_probe_oui(intel_dp);
2246 edid = intel_dp_get_edid(connector, &intel_dp->
adapter);
2256 static int intel_dp_get_modes(
struct drm_connector *connector)
2258 struct intel_dp *intel_dp = intel_attached_dp(connector);
2266 ret = intel_dp_get_edid_modes(connector, &intel_dp->
adapter);
2283 if (is_edp(intel_dp)) {
2306 struct intel_dp *intel_dp = intel_attached_dp(connector);
2308 bool has_audio =
false;
2310 edid = intel_dp_get_edid(connector, &intel_dp->
adapter);
2325 struct intel_dp *intel_dp = intel_attached_dp(connector);
2342 has_audio = intel_dp_detect_audio(connector);
2364 if (intel_dp->
base.base.crtc) {
2367 crtc->
x, crtc->
y, crtc->
fb);
2377 struct intel_dp *intel_dp = intel_attached_dp(connector);
2379 if (is_edp(intel_dp))
2387 static void intel_dp_encoder_destroy(
struct drm_encoder *encoder)
2389 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2393 if (is_edp(intel_dp)) {
2396 ironlake_panel_vdd_off_sync(intel_dp);
2402 .mode_fixup = intel_dp_mode_fixup,
2403 .mode_set = intel_dp_mode_set,
2409 .detect = intel_dp_detect,
2411 .set_property = intel_dp_set_property,
2412 .destroy = intel_dp_destroy,
2416 .get_modes = intel_dp_get_modes,
2417 .mode_valid = intel_dp_mode_valid,
2422 .destroy = intel_dp_encoder_destroy,
2428 struct intel_dp *intel_dp =
container_of(intel_encoder,
struct intel_dp,
base);
2430 intel_dp_check_link_status(intel_dp);
2441 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->
base);
2472 intel_dp_add_properties(
struct intel_dp *intel_dp,
struct drm_connector *connector)
2483 struct intel_dp *intel_dp;
2489 intel_dp = kzalloc(
sizeof(
struct intel_dp),
GFP_KERNEL);
2498 intel_connector = kzalloc(
sizeof(
struct intel_connector),
GFP_KERNEL);
2499 if (!intel_connector) {
2503 intel_encoder = &intel_dp->
base;
2509 if (output_reg ==
DP_A || is_pch_edp(intel_dp)) {
2517 connector = &intel_connector->
base;
2519 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2526 ironlake_panel_vdd_work);
2528 intel_encoder->
crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2535 drm_encoder_helper_add(&intel_encoder->
base, &intel_dp_helper_funcs);
2540 intel_encoder->
enable = intel_enable_dp;
2541 intel_encoder->
pre_enable = intel_pre_enable_dp;
2542 intel_encoder->
disable = intel_disable_dp;
2570 if (is_edp(intel_dp)) {
2572 u32 pp_on, pp_off, pp_div;
2578 if (!pp_on || !pp_off || !pp_div) {
2579 DRM_INFO(
"bad panel power sequencing delays, disabling panel\n");
2580 intel_dp_encoder_destroy(&intel_dp->
base.base);
2581 intel_dp_destroy(&intel_connector->
base);
2601 DRM_DEBUG_KMS(
"cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2604 vbt = dev_priv->
edp.pps;
2606 DRM_DEBUG_KMS(
"vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2609 #define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
2617 DRM_DEBUG_KMS(
"panel power up delay %d, power down delay %d, power cycle delay %d\n",
2621 DRM_DEBUG_KMS(
"backlight on delay %d, off delay %d\n",
2625 intel_dp_i2c_init(intel_dp, intel_connector, name);
2627 if (is_edp(intel_dp)) {
2631 ironlake_edp_panel_vdd_on(intel_dp);
2632 ret = intel_dp_get_dpcd(intel_dp);
2633 ironlake_edp_panel_vdd_off(intel_dp,
false);
2642 DRM_INFO(
"failed to retrieve link info, disabling eDP\n");
2643 intel_dp_encoder_destroy(&intel_dp->
base.base);
2644 intel_dp_destroy(&intel_connector->
base);
2648 ironlake_edp_panel_vdd_on(intel_dp);
2656 intel_dp->
edid = edid;
2658 ironlake_edp_panel_vdd_off(intel_dp,
false);
2661 intel_encoder->
hot_plug = intel_dp_hot_plug;
2663 if (is_edp(intel_dp)) {
2668 intel_dp_add_properties(intel_dp, connector);