wlroots-hyprland/backend/drm/atomic.c

273 lines
7.4 KiB
C
Raw Normal View History

2017-08-09 10:43:01 +02:00
#include <gbm.h>
2018-02-12 21:29:23 +01:00
#include <stdlib.h>
#include <wlr/util/log.h>
2017-08-09 10:43:01 +02:00
#include <xf86drm.h>
#include <xf86drmMode.h>
#include "backend/drm/drm.h"
#include "backend/drm/iface.h"
#include "backend/drm/util.h"
2017-08-09 10:43:01 +02:00
struct atomic {
drmModeAtomicReq *req;
int cursor;
bool failed;
};
static void atomic_begin(struct wlr_drm_crtc *crtc, struct atomic *atom) {
if (!crtc->atomic) {
crtc->atomic = drmModeAtomicAlloc();
if (!crtc->atomic) {
2018-07-09 23:49:54 +02:00
wlr_log_errno(WLR_ERROR, "Allocation failed");
2017-08-09 10:43:01 +02:00
atom->failed = true;
return;
}
}
atom->req = crtc->atomic;
atom->cursor = drmModeAtomicGetCursor(atom->req);
atom->failed = false;
}
static bool atomic_end(int drm_fd, uint32_t flags, struct atomic *atom) {
2017-08-09 10:43:01 +02:00
if (atom->failed) {
return false;
}
flags |= DRM_MODE_ATOMIC_TEST_ONLY;
2017-08-09 10:43:01 +02:00
if (drmModeAtomicCommit(drm_fd, atom->req, flags, NULL)) {
wlr_log_errno(WLR_DEBUG, "Atomic test failed");
2017-08-09 10:43:01 +02:00
drmModeAtomicSetCursor(atom->req, atom->cursor);
return false;
}
return true;
}
static bool atomic_commit(int drm_fd, struct atomic *atom,
struct wlr_drm_connector *conn, uint32_t flags, bool modeset) {
struct wlr_drm_backend *drm =
get_drm_backend_from_backend(conn->output.backend);
2017-08-09 10:43:01 +02:00
if (atom->failed) {
return false;
}
int ret = drmModeAtomicCommit(drm_fd, atom->req, flags, drm);
2017-08-09 10:43:01 +02:00
if (ret) {
2018-07-09 23:49:54 +02:00
wlr_log_errno(WLR_ERROR, "%s: Atomic commit failed (%s)",
conn->output.name, modeset ? "modeset" : "pageflip");
2017-08-09 10:43:01 +02:00
}
drmModeAtomicSetCursor(atom->req, 0);
return !ret;
}
static void atomic_add(struct atomic *atom, uint32_t id, uint32_t prop, uint64_t val) {
2017-08-09 10:43:01 +02:00
if (!atom->failed && drmModeAtomicAddProperty(atom->req, id, prop, val) < 0) {
2018-07-09 23:49:54 +02:00
wlr_log_errno(WLR_ERROR, "Failed to add atomic DRM property");
2017-08-09 10:43:01 +02:00
atom->failed = true;
}
}
static bool create_gamma_lut_blob(struct wlr_drm_backend *drm,
struct wlr_drm_crtc *crtc, uint32_t *blob_id) {
if (crtc->gamma_table_size == 0) {
*blob_id = 0;
return true;
}
uint32_t size = crtc->gamma_table_size;
uint16_t *r = crtc->gamma_table;
uint16_t *g = crtc->gamma_table + size;
uint16_t *b = crtc->gamma_table + 2 * size;
struct drm_color_lut *gamma = malloc(size * sizeof(struct drm_color_lut));
if (gamma == NULL) {
wlr_log(WLR_ERROR, "Failed to allocate gamma table");
return false;
}
for (size_t i = 0; i < size; i++) {
gamma[i].red = r[i];
gamma[i].green = g[i];
gamma[i].blue = b[i];
}
if (drmModeCreatePropertyBlob(drm->fd, gamma,
size * sizeof(struct drm_color_lut), blob_id) != 0) {
wlr_log_errno(WLR_ERROR, "Unable to create property blob");
free(gamma);
return false;
}
free(gamma);
return true;
}
static void plane_disable(struct atomic *atom, struct wlr_drm_plane *plane) {
2017-08-09 10:43:01 +02:00
uint32_t id = plane->id;
const union wlr_drm_plane_props *props = &plane->props;
atomic_add(atom, id, props->fb_id, 0);
atomic_add(atom, id, props->crtc_id, 0);
}
static void set_plane_props(struct atomic *atom, struct wlr_drm_backend *drm,
struct wlr_drm_plane *plane, uint32_t crtc_id, int32_t x, int32_t y) {
uint32_t id = plane->id;
const union wlr_drm_plane_props *props = &plane->props;
struct wlr_drm_fb *fb = plane_get_next_fb(plane);
struct gbm_bo *bo = drm_fb_acquire(fb, drm, &plane->mgpu_surf);
if (!bo) {
goto error;
}
uint32_t fb_id = get_fb_for_bo(bo, drm->addfb2_modifiers);
if (!fb_id) {
goto error;
}
2017-08-09 10:43:01 +02:00
// The src_* properties are in 16.16 fixed point
atomic_add(atom, id, props->src_x, 0);
atomic_add(atom, id, props->src_y, 0);
atomic_add(atom, id, props->src_w, (uint64_t)plane->surf.width << 16);
atomic_add(atom, id, props->src_h, (uint64_t)plane->surf.height << 16);
2017-09-30 09:52:58 +02:00
atomic_add(atom, id, props->crtc_w, plane->surf.width);
atomic_add(atom, id, props->crtc_h, plane->surf.height);
2017-08-09 10:43:01 +02:00
atomic_add(atom, id, props->fb_id, fb_id);
atomic_add(atom, id, props->crtc_id, crtc_id);
atomic_add(atom, id, props->crtc_x, (uint64_t)x);
atomic_add(atom, id, props->crtc_y, (uint64_t)y);
return;
error:
atom->failed = true;
2017-08-09 10:43:01 +02:00
}
static bool atomic_crtc_commit(struct wlr_drm_backend *drm,
struct wlr_drm_connector *conn, uint32_t flags) {
struct wlr_drm_crtc *crtc = conn->crtc;
bool modeset = crtc->pending & WLR_DRM_CRTC_MODE;
if (modeset) {
if (crtc->mode_id != 0) {
2017-09-30 11:22:26 +02:00
drmModeDestroyPropertyBlob(drm->fd, crtc->mode_id);
2017-08-09 10:43:01 +02:00
}
if (drmModeCreatePropertyBlob(drm->fd, &crtc->mode,
sizeof(drmModeModeInfo), &crtc->mode_id)) {
wlr_log_errno(WLR_ERROR, "Unable to create mode property blob");
2017-08-09 10:43:01 +02:00
return false;
}
}
if (crtc->pending & WLR_DRM_CRTC_GAMMA_LUT) {
// Fallback to legacy gamma interface when gamma properties are not available
// (can happen on older Intel GPUs that support gamma but not degamma).
if (crtc->props.gamma_lut == 0) {
if (!drm_legacy_crtc_set_gamma(drm, crtc)) {
return false;
}
} else {
if (crtc->gamma_lut != 0) {
drmModeDestroyPropertyBlob(drm->fd, crtc->gamma_lut);
}
if (!create_gamma_lut_blob(drm, crtc, &crtc->gamma_lut)) {
return false;
}
}
}
if (modeset) {
flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
} else {
flags |= DRM_MODE_ATOMIC_NONBLOCK;
}
2017-08-09 10:43:01 +02:00
struct atomic atom;
2017-08-09 10:43:01 +02:00
atomic_begin(crtc, &atom);
atomic_add(&atom, conn->id, conn->props.crtc_id, crtc->id);
if (modeset && conn->props.link_status != 0) {
atomic_add(&atom, conn->id, conn->props.link_status,
DRM_MODE_LINK_STATUS_GOOD);
}
2017-08-09 10:43:01 +02:00
atomic_add(&atom, crtc->id, crtc->props.mode_id, crtc->mode_id);
atomic_add(&atom, crtc->id, crtc->props.active, 1);
atomic_add(&atom, crtc->id, crtc->props.gamma_lut, crtc->gamma_lut);
set_plane_props(&atom, drm, crtc->primary, crtc->id, 0, 0);
if (crtc->cursor) {
if (crtc->cursor->cursor_enabled) {
set_plane_props(&atom, drm, crtc->cursor, crtc->id,
conn->cursor_x, conn->cursor_y);
} else {
plane_disable(&atom, crtc->cursor);
}
}
if (!atomic_end(drm->fd, modeset ? DRM_MODE_ATOMIC_ALLOW_MODESET : 0,
&atom)) {
drmModeAtomicSetCursor(atom.req, 0);
return false;
}
if (!atomic_commit(drm->fd, &atom, conn, flags, modeset)) {
return false;
}
if (crtc->cursor) {
drm_fb_move(&crtc->cursor->queued_fb, &crtc->cursor->pending_fb);
}
return true;
2017-08-09 10:43:01 +02:00
}
2018-01-07 00:28:21 +01:00
static bool atomic_conn_enable(struct wlr_drm_backend *drm,
struct wlr_drm_connector *conn, bool enable) {
struct wlr_drm_crtc *crtc = conn->crtc;
backend/drm: steal CRTCs from disabled outputs This commit allows outputs that need a CRTC to steal it from user-disabled outputs. Note that in the case there are enough CRTCs, disabled outputs don't loose it (so there's no modeset and plane initialization needed after DPMS). CRTC allocation still prefers to keep the old configuration, even if that means allocating an extra CRTC to a disabled output. CRTC reallocation now happen when enabling/disabling an output as well as when trying to modeset. When enabling an output without a CRTC, we realloc to try to steal a CRTC from a disabled output (that doesn't really need the CRTC). When disabling an output, we try to give our CRTC to an output that needs one. Modesetting is similar to enabling. A new DRM connector field has been added: `desired_enabled`. Outputs without CRTCs get automatically disabled. This field keeps track of the state desired by the user, allowing to automatically re-enable outputs when a CRTC becomes free. This required some changes to the allocation algorithm. Previously, the algorithm tried to keep the previous configuration even if a new configuration with a better score was possible (it only changed configuration when the old one didn't work anymore). This is now changed and the old configuration (still preferred) is only retained without considering new possibilities when it's perfect (all outputs have CRTCs). User-disabled outputs now have `possible_crtcs` set to 0, meaning they can only retain a previous CRTC (not acquire a new one). The allocation algorithm has been updated to do not bump the score when assigning a CRTC to a disabled output.
2018-09-10 23:35:22 +02:00
if (crtc == NULL) {
return !enable;
}
2017-08-09 10:43:01 +02:00
2018-01-07 00:28:21 +01:00
struct atomic atom;
2017-08-09 10:43:01 +02:00
atomic_begin(crtc, &atom);
atomic_add(&atom, crtc->id, crtc->props.active, enable);
2018-01-07 00:28:21 +01:00
if (enable) {
atomic_add(&atom, conn->id, conn->props.crtc_id, crtc->id);
atomic_add(&atom, crtc->id, crtc->props.mode_id, crtc->mode_id);
} else {
atomic_add(&atom, conn->id, conn->props.crtc_id, 0);
atomic_add(&atom, crtc->id, crtc->props.mode_id, 0);
}
return atomic_commit(drm->fd, &atom, conn, DRM_MODE_ATOMIC_ALLOW_MODESET,
true);
2017-08-09 10:43:01 +02:00
}
2017-09-30 11:22:26 +02:00
static bool atomic_crtc_set_cursor(struct wlr_drm_backend *drm,
struct wlr_drm_crtc *crtc, struct gbm_bo *bo) {
/* Cursor updates happen when we pageflip */
return true;
2017-08-09 10:43:01 +02:00
}
static size_t atomic_crtc_get_gamma_size(struct wlr_drm_backend *drm,
struct wlr_drm_crtc *crtc) {
if (crtc->props.gamma_lut_size == 0) {
return legacy_iface.crtc_get_gamma_size(drm, crtc);
}
uint64_t gamma_lut_size;
if (!get_drm_prop(drm->fd, crtc->id, crtc->props.gamma_lut_size,
&gamma_lut_size)) {
2018-07-09 23:49:54 +02:00
wlr_log(WLR_ERROR, "Unable to get gamma lut size");
return 0;
}
return (size_t)gamma_lut_size;
}
2017-10-02 10:44:33 +02:00
const struct wlr_drm_interface atomic_iface = {
2017-08-09 10:43:01 +02:00
.conn_enable = atomic_conn_enable,
.crtc_commit = atomic_crtc_commit,
2017-08-09 10:43:01 +02:00
.crtc_set_cursor = atomic_crtc_set_cursor,
.crtc_get_gamma_size = atomic_crtc_get_gamma_size,
2017-08-09 10:43:01 +02:00
};