This commit is contained in:
2025-12-28 13:55:10 -08:00
commit 9b4219aa67
131 changed files with 32853 additions and 0 deletions

702
src/args_parser.c Normal file
View File

@ -0,0 +1,702 @@
#include "../include/args_parser.h"
#include "../include/defs.h"
#include "../include/egl.h"
#include "../include/window/window.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <limits.h>
#include <assert.h>
#include <errno.h>
#include <libgen.h>
#include <sys/stat.h>
#ifndef GSR_VERSION
#define GSR_VERSION "unknown"
#endif
static const ArgEnum video_codec_enums[] = {
{ .name = "auto", .value = GSR_VIDEO_CODEC_AUTO },
{ .name = "h264", .value = GSR_VIDEO_CODEC_H264 },
{ .name = "h265", .value = GSR_VIDEO_CODEC_HEVC },
{ .name = "hevc", .value = GSR_VIDEO_CODEC_HEVC },
{ .name = "hevc_hdr", .value = GSR_VIDEO_CODEC_HEVC_HDR },
{ .name = "hevc_10bit", .value = GSR_VIDEO_CODEC_HEVC_10BIT },
{ .name = "av1", .value = GSR_VIDEO_CODEC_AV1 },
{ .name = "av1_hdr", .value = GSR_VIDEO_CODEC_AV1_HDR },
{ .name = "av1_10bit", .value = GSR_VIDEO_CODEC_AV1_10BIT },
{ .name = "vp8", .value = GSR_VIDEO_CODEC_VP8 },
{ .name = "vp9", .value = GSR_VIDEO_CODEC_VP9 },
};
static const ArgEnum audio_codec_enums[] = {
{ .name = "opus", .value = GSR_AUDIO_CODEC_OPUS },
{ .name = "aac", .value = GSR_AUDIO_CODEC_AAC },
{ .name = "flac", .value = GSR_AUDIO_CODEC_FLAC },
};
static const ArgEnum video_encoder_enums[] = {
{ .name = "gpu", .value = GSR_VIDEO_ENCODER_HW_GPU },
{ .name = "cpu", .value = GSR_VIDEO_ENCODER_HW_CPU },
};
static const ArgEnum pixel_format_enums[] = {
{ .name = "yuv420", .value = GSR_PIXEL_FORMAT_YUV420 },
{ .name = "yuv444", .value = GSR_PIXEL_FORMAT_YUV444 },
};
static const ArgEnum framerate_mode_enums[] = {
{ .name = "vfr", .value = GSR_FRAMERATE_MODE_VARIABLE },
{ .name = "cfr", .value = GSR_FRAMERATE_MODE_CONSTANT },
{ .name = "content", .value = GSR_FRAMERATE_MODE_CONTENT },
};
static const ArgEnum bitrate_mode_enums[] = {
{ .name = "auto", .value = GSR_BITRATE_MODE_AUTO },
{ .name = "qp", .value = GSR_BITRATE_MODE_QP },
{ .name = "cbr", .value = GSR_BITRATE_MODE_CBR },
{ .name = "vbr", .value = GSR_BITRATE_MODE_VBR },
};
static const ArgEnum color_range_enums[] = {
{ .name = "limited", .value = GSR_COLOR_RANGE_LIMITED },
{ .name = "full", .value = GSR_COLOR_RANGE_FULL },
};
static const ArgEnum tune_enums[] = {
{ .name = "performance", .value = GSR_TUNE_PERFORMANCE },
{ .name = "quality", .value = GSR_TUNE_QUALITY },
};
static const ArgEnum replay_storage_enums[] = {
{ .name = "ram", .value = GSR_REPLAY_STORAGE_RAM },
{ .name = "disk", .value = GSR_REPLAY_STORAGE_DISK },
};
static void arg_deinit(Arg *arg) {
if(arg->values) {
free(arg->values);
arg->values = NULL;
}
}
static bool arg_append_value(Arg *arg, const char *value) {
if(arg->num_values + 1 >= arg->capacity_num_values) {
const int new_capacity_num_values = arg->capacity_num_values == 0 ? 4 : arg->capacity_num_values*2;
void *new_data = realloc(arg->values, new_capacity_num_values * sizeof(const char*));
if(!new_data)
return false;
arg->values = new_data;
arg->capacity_num_values = new_capacity_num_values;
}
arg->values[arg->num_values] = value;
++arg->num_values;
return true;
}
static bool arg_get_enum_value_by_name(const Arg *arg, const char *name, int *enum_value) {
assert(arg->type == ARG_TYPE_ENUM);
assert(arg->enum_values);
for(int i = 0; i < arg->num_enum_values; ++i) {
if(strcmp(arg->enum_values[i].name, name) == 0) {
*enum_value = arg->enum_values[i].value;
return true;
}
}
return false;
}
static void arg_print_expected_enum_names(const Arg *arg) {
assert(arg->type == ARG_TYPE_ENUM);
assert(arg->enum_values);
for(int i = 0; i < arg->num_enum_values; ++i) {
if(i > 0) {
if(i == arg->num_enum_values -1)
fprintf(stderr, " or ");
else
fprintf(stderr, ", ");
}
fprintf(stderr, "'%s'", arg->enum_values[i].name);
}
}
static Arg* args_get_by_key(Arg *args, int num_args, const char *key) {
for(int i = 0; i < num_args; ++i) {
if(strcmp(args[i].key, key) == 0)
return &args[i];
}
return NULL;
}
static const char* args_get_value_by_key(Arg *args, int num_args, const char *key) {
for(int i = 0; i < num_args; ++i) {
if(strcmp(args[i].key, key) == 0) {
if(args[i].num_values == 0)
return NULL;
else
return args[i].values[0];
}
}
return NULL;
}
static bool args_get_boolean_by_key(Arg *args, int num_args, const char *key, bool default_value) {
Arg *arg = args_get_by_key(args, num_args, key);
assert(arg);
if(arg->num_values == 0) {
return default_value;
} else {
assert(arg->type == ARG_TYPE_BOOLEAN);
return arg->typed_value.boolean;
}
}
static int args_get_enum_by_key(Arg *args, int num_args, const char *key, int default_value) {
Arg *arg = args_get_by_key(args, num_args, key);
assert(arg);
if(arg->num_values == 0) {
return default_value;
} else {
assert(arg->type == ARG_TYPE_ENUM);
return arg->typed_value.enum_value;
}
}
static int64_t args_get_i64_by_key(Arg *args, int num_args, const char *key, int64_t default_value) {
Arg *arg = args_get_by_key(args, num_args, key);
assert(arg);
if(arg->num_values == 0) {
return default_value;
} else {
assert(arg->type == ARG_TYPE_I64);
return arg->typed_value.i64_value;
}
}
static double args_get_double_by_key(Arg *args, int num_args, const char *key, double default_value) {
Arg *arg = args_get_by_key(args, num_args, key);
assert(arg);
if(arg->num_values == 0) {
return default_value;
} else {
assert(arg->type == ARG_TYPE_DOUBLE);
return arg->typed_value.d_value;
}
}
static void usage_header(void) {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
printf("usage: %s -w <window_id|monitor|focused|portal|region|v4l2_device_path> [-c <container_format>] [-s WxH] [-region WxH+X+Y] [-f <fps>] [-a <audio_input>] "
"[-q <quality>] [-r <replay_buffer_size_sec>] [-replay-storage ram|disk] [-restart-replay-on-save yes|no] "
"[-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] "
"[-bm auto|qp|vbr|cbr] [-cr limited|full] [-tune performance|quality] [-df yes|no] [-sc <script_path>] [-p <plugin_path>] "
"[-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] "
"[-fallback-cpu-encoding yes|no] [-o <output_file>] [-ro <output_directory>] [--list-capture-options [card_path]] [--list-audio-devices] "
"[--list-application-audio] [--list-v4l2-devices] [-v yes|no] [-gl-debug yes|no] [--version] [-h|--help]\n", program_name);
fflush(stdout);
}
static void usage_full(void) {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
usage_header();
printf("\n");
printf("NOTES:\n");
if(inside_flatpak)
printf(" Run \"man /var/lib/flatpak/app/com.dec05eba.gpu_screen_recorder/current/active/files/share/man/man1/gpu-screen-recorder.1\" to open the man page for GPU Screen Recorder to see an explanation for each option and examples\n");
else
printf(" Run \"man gpu-screen-recorder.1\" to open the man page for GPU Screen Recorder to see an explanation for each option and examples\n");
fflush(stdout);
}
static void usage(void) {
usage_header();
}
// TODO: Does this match all livestreaming cases?
static bool is_livestream_path(const char *str) {
const int len = strlen(str);
if((len >= 7 && memcmp(str, "http://", 7) == 0) || (len >= 8 && memcmp(str, "https://", 8) == 0))
return true;
else if((len >= 7 && memcmp(str, "rtmp://", 7) == 0) || (len >= 8 && memcmp(str, "rtmps://", 8) == 0))
return true;
else if((len >= 7 && memcmp(str, "rtsp://", 7) == 0))
return true;
else if((len >= 6 && memcmp(str, "srt://", 6) == 0))
return true;
else if((len >= 6 && memcmp(str, "tcp://", 6) == 0))
return true;
else if((len >= 6 && memcmp(str, "udp://", 6) == 0))
return true;
else
return false;
}
static bool args_parser_set_values(args_parser *self) {
self->video_encoder = (gsr_video_encoder_hardware)args_get_enum_by_key(self->args, NUM_ARGS, "-encoder", GSR_VIDEO_ENCODER_HW_GPU);
self->pixel_format = (gsr_pixel_format)args_get_enum_by_key(self->args, NUM_ARGS, "-pixfmt", GSR_PIXEL_FORMAT_YUV420);
self->framerate_mode = (gsr_framerate_mode)args_get_enum_by_key(self->args, NUM_ARGS, "-fm", GSR_FRAMERATE_MODE_VARIABLE);
self->color_range = (gsr_color_range)args_get_enum_by_key(self->args, NUM_ARGS, "-cr", GSR_COLOR_RANGE_LIMITED);
self->tune = (gsr_tune)args_get_enum_by_key(self->args, NUM_ARGS, "-tune", GSR_TUNE_PERFORMANCE);
self->video_codec = (gsr_video_codec)args_get_enum_by_key(self->args, NUM_ARGS, "-k", GSR_VIDEO_CODEC_AUTO);
self->audio_codec = (gsr_audio_codec)args_get_enum_by_key(self->args, NUM_ARGS, "-ac", GSR_AUDIO_CODEC_OPUS);
self->bitrate_mode = (gsr_bitrate_mode)args_get_enum_by_key(self->args, NUM_ARGS, "-bm", GSR_BITRATE_MODE_AUTO);
self->replay_storage = (gsr_replay_storage)args_get_enum_by_key(self->args, NUM_ARGS, "-replay-storage", GSR_REPLAY_STORAGE_RAM);
self->capture_source = args_get_value_by_key(self->args, NUM_ARGS, "-w");
self->verbose = args_get_boolean_by_key(self->args, NUM_ARGS, "-v", true);
self->gl_debug = args_get_boolean_by_key(self->args, NUM_ARGS, "-gl-debug", false);
self->record_cursor = args_get_boolean_by_key(self->args, NUM_ARGS, "-cursor", true);
self->date_folders = args_get_boolean_by_key(self->args, NUM_ARGS, "-df", false);
self->restore_portal_session = args_get_boolean_by_key(self->args, NUM_ARGS, "-restore-portal-session", false);
self->restart_replay_on_save = args_get_boolean_by_key(self->args, NUM_ARGS, "-restart-replay-on-save", false);
self->overclock = args_get_boolean_by_key(self->args, NUM_ARGS, "-oc", false);
self->fallback_cpu_encoding = args_get_boolean_by_key(self->args, NUM_ARGS, "-fallback-cpu-encoding", false);
self->audio_bitrate = args_get_i64_by_key(self->args, NUM_ARGS, "-ab", 0);
self->audio_bitrate *= 1000LL;
self->keyint = args_get_double_by_key(self->args, NUM_ARGS, "-keyint", 2.0);
if(self->audio_codec == GSR_AUDIO_CODEC_FLAC) {
fprintf(stderr, "gsr warning: flac audio codec is temporary disabled, using opus audio codec instead\n");
self->audio_codec = GSR_AUDIO_CODEC_OPUS;
}
self->portal_session_token_filepath = args_get_value_by_key(self->args, NUM_ARGS, "-portal-session-token-filepath");
if(self->portal_session_token_filepath) {
int len = strlen(self->portal_session_token_filepath);
if(len > 0 && self->portal_session_token_filepath[len - 1] == '/') {
fprintf(stderr, "gsr error: -portal-session-token-filepath should be a path to a file but it ends with a /: %s\n", self->portal_session_token_filepath);
return false;
}
}
self->recording_saved_script = args_get_value_by_key(self->args, NUM_ARGS, "-sc");
if(self->recording_saved_script) {
struct stat buf;
if(stat(self->recording_saved_script, &buf) == -1 || !S_ISREG(buf.st_mode)) {
fprintf(stderr, "gsr error: Script \"%s\" either doesn't exist or it's not a file\n", self->recording_saved_script);
usage();
return false;
}
if(!(buf.st_mode & S_IXUSR)) {
fprintf(stderr, "gsr error: Script \"%s\" is not executable\n", self->recording_saved_script);
usage();
return false;
}
}
const char *quality_str = args_get_value_by_key(self->args, NUM_ARGS, "-q");
self->video_quality = GSR_VIDEO_QUALITY_VERY_HIGH;
self->video_bitrate = 0;
if(self->bitrate_mode == GSR_BITRATE_MODE_CBR) {
if(!quality_str) {
fprintf(stderr, "gsr error: option '-q' is required when using '-bm cbr' option\n");
usage();
return false;
}
if(sscanf(quality_str, "%" PRIi64, &self->video_bitrate) != 1) {
fprintf(stderr, "gsr error: -q argument \"%s\" is not an integer value. When using '-bm cbr' option '-q' is expected to be an integer value\n", quality_str);
usage();
return false;
}
if(self->video_bitrate < 0) {
fprintf(stderr, "gsr error: -q is expected to be 0 or larger, got %" PRIi64 "\n", self->video_bitrate);
usage();
return false;
}
self->video_bitrate *= 1000LL;
} else {
if(!quality_str)
quality_str = "very_high";
if(strcmp(quality_str, "medium") == 0) {
self->video_quality = GSR_VIDEO_QUALITY_MEDIUM;
} else if(strcmp(quality_str, "high") == 0) {
self->video_quality = GSR_VIDEO_QUALITY_HIGH;
} else if(strcmp(quality_str, "very_high") == 0) {
self->video_quality = GSR_VIDEO_QUALITY_VERY_HIGH;
} else if(strcmp(quality_str, "ultra") == 0) {
self->video_quality = GSR_VIDEO_QUALITY_ULTRA;
} else {
fprintf(stderr, "gsr error: -q should either be 'medium', 'high', 'very_high' or 'ultra', got: '%s'\n", quality_str);
usage();
return false;
}
}
self->output_resolution = (vec2i){0, 0};
const char *output_resolution_str = args_get_value_by_key(self->args, NUM_ARGS, "-s");
if(output_resolution_str) {
if(sscanf(output_resolution_str, "%dx%d", &self->output_resolution.x, &self->output_resolution.y) != 2) {
fprintf(stderr, "gsr error: invalid value for option -s '%s', expected a value in format WxH\n", output_resolution_str);
usage();
return false;
}
if(self->output_resolution.x < 0 || self->output_resolution.y < 0) {
fprintf(stderr, "gsr error: invalid value for option -s '%s', expected width and height to be greater or equal to 0\n", output_resolution_str);
usage();
return false;
}
}
self->region_size = (vec2i){0, 0};
self->region_position = (vec2i){0, 0};
const char *region_str = args_get_value_by_key(self->args, NUM_ARGS, "-region");
if(region_str) {
if(sscanf(region_str, "%dx%d+%d+%d", &self->region_size.x, &self->region_size.y, &self->region_position.x, &self->region_position.y) != 4) {
fprintf(stderr, "gsr error: invalid value for option -region '%s', expected a value in format WxH+X+Y\n", region_str);
usage();
return false;
}
if(self->region_size.x < 0 || self->region_size.y < 0) {
fprintf(stderr, "gsr error: invalid value for option -region '%s', expected width and height to be greater or equal to 0\n", region_str);
usage();
return false;
}
}
self->fps = args_get_i64_by_key(self->args, NUM_ARGS, "-f", 60);
self->replay_buffer_size_secs = args_get_i64_by_key(self->args, NUM_ARGS, "-r", -1);
if(self->replay_buffer_size_secs != -1)
self->replay_buffer_size_secs += (int64_t)(self->keyint + 0.5); // Add a few seconds to account of lost packets because of non-keyframe packets skipped
self->container_format = args_get_value_by_key(self->args, NUM_ARGS, "-c");
if(self->container_format && strcmp(self->container_format, "mkv") == 0)
self->container_format = "matroska";
const bool is_replaying = self->replay_buffer_size_secs != -1;
self->is_livestream = false;
self->filename = args_get_value_by_key(self->args, NUM_ARGS, "-o");
if(self->filename) {
self->is_livestream = is_livestream_path(self->filename);
if(self->is_livestream) {
if(is_replaying) {
fprintf(stderr, "gsr error: replay mode is not applicable to live streaming\n");
return false;
}
} else {
if(!is_replaying) {
char directory_buf[PATH_MAX];
snprintf(directory_buf, sizeof(directory_buf), "%s", self->filename);
char *directory = dirname(directory_buf);
if(strcmp(directory, ".") != 0 && strcmp(directory, "/") != 0) {
if(create_directory_recursive(directory) != 0) {
fprintf(stderr, "gsr error: failed to create directory for output file: %s\n", self->filename);
return false;
}
}
} else {
if(!self->container_format) {
fprintf(stderr, "gsr error: option -c is required when using option -r\n");
usage();
return false;
}
struct stat buf;
if(stat(self->filename, &buf) != -1 && !S_ISDIR(buf.st_mode)) {
fprintf(stderr, "gsr error: File \"%s\" exists but it's not a directory\n", self->filename);
usage();
return false;
}
}
}
} else {
if(!is_replaying) {
self->filename = "/dev/stdout";
} else {
fprintf(stderr, "gsr error: Option -o is required when using option -r\n");
usage();
return false;
}
if(!self->container_format) {
fprintf(stderr, "gsr error: option -c is required when not using option -o\n");
usage();
return false;
}
}
self->is_output_piped = strcmp(self->filename, "/dev/stdout") == 0;
self->low_latency_recording = self->is_livestream || self->is_output_piped;
self->replay_recording_directory = args_get_value_by_key(self->args, NUM_ARGS, "-ro");
if(self->is_livestream && self->recording_saved_script) {
fprintf(stderr, "gsr warning: live stream detected, -sc script is ignored\n");
self->recording_saved_script = NULL;
}
return true;
}
bool args_parser_parse(args_parser *self, int argc, char **argv, const args_handlers *arg_handlers, void *userdata) {
assert(arg_handlers);
memset(self, 0, sizeof(*self));
if(argc <= 1) {
usage_full();
return false;
}
if(argc == 2 && (strcmp(argv[1], "-h") == 0 || strcmp(argv[1], "--help") == 0)) {
usage_full();
return false;
}
if(argc == 2 && strcmp(argv[1], "--info") == 0) {
arg_handlers->info(userdata);
return true;
}
if(argc == 2 && strcmp(argv[1], "--list-audio-devices") == 0) {
arg_handlers->list_audio_devices(userdata);
return true;
}
if(argc == 2 && strcmp(argv[1], "--list-application-audio") == 0) {
arg_handlers->list_application_audio(userdata);
return true;
}
if(argc == 2 && strcmp(argv[1], "--list-v4l2-devices") == 0) {
arg_handlers->list_v4l2_devices(userdata);
return true;
}
if(strcmp(argv[1], "--list-capture-options") == 0) {
if(argc == 2) {
arg_handlers->list_capture_options(NULL, userdata);
return true;
} else if(argc == 3 || argc == 4) {
const char *card_path = argv[2];
arg_handlers->list_capture_options(card_path, userdata);
return true;
} else {
fprintf(stderr, "gsr error: expected --list-capture-options to be called with either no extra arguments or 1 extra argument (card path)\n");
return false;
}
}
if(argc == 2 && strcmp(argv[1], "--version") == 0) {
arg_handlers->version(userdata);
return true;
}
int arg_index = 0;
self->args[arg_index++] = (Arg){ .key = "-w", .optional = false, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-c", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-f", .optional = true, .list = false, .type = ARG_TYPE_I64, .integer_value_min = 1, .integer_value_max = 1000 };
self->args[arg_index++] = (Arg){ .key = "-s", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-region", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-a", .optional = true, .list = true, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-q", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-o", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-ro", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-r", .optional = true, .list = false, .type = ARG_TYPE_I64, .integer_value_min = 2, .integer_value_max = 86400 };
self->args[arg_index++] = (Arg){ .key = "-restart-replay-on-save", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-k", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = video_codec_enums, .num_enum_values = sizeof(video_codec_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-ac", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = audio_codec_enums, .num_enum_values = sizeof(audio_codec_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-ab", .optional = true, .list = false, .type = ARG_TYPE_I64, .integer_value_min = 0, .integer_value_max = 50000 };
self->args[arg_index++] = (Arg){ .key = "-oc", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-fm", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = framerate_mode_enums, .num_enum_values = sizeof(framerate_mode_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-bm", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = bitrate_mode_enums, .num_enum_values = sizeof(bitrate_mode_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-pixfmt", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = pixel_format_enums, .num_enum_values = sizeof(pixel_format_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-v", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-gl-debug", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-df", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-sc", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-cr", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = color_range_enums, .num_enum_values = sizeof(color_range_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-tune", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = tune_enums, .num_enum_values = sizeof(tune_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-cursor", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-keyint", .optional = true, .list = false, .type = ARG_TYPE_DOUBLE, .integer_value_min = 0, .integer_value_max = 500 };
self->args[arg_index++] = (Arg){ .key = "-restore-portal-session", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-portal-session-token-filepath", .optional = true, .list = false, .type = ARG_TYPE_STRING };
self->args[arg_index++] = (Arg){ .key = "-encoder", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = video_encoder_enums, .num_enum_values = sizeof(video_encoder_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-fallback-cpu-encoding", .optional = true, .list = false, .type = ARG_TYPE_BOOLEAN };
self->args[arg_index++] = (Arg){ .key = "-replay-storage", .optional = true, .list = false, .type = ARG_TYPE_ENUM, .enum_values = replay_storage_enums, .num_enum_values = sizeof(replay_storage_enums)/sizeof(ArgEnum) };
self->args[arg_index++] = (Arg){ .key = "-p", .optional = true, .list = true, .type = ARG_TYPE_STRING };
assert(arg_index == NUM_ARGS);
for(int i = 1; i < argc; i += 2) {
const char *arg_name = argv[i];
Arg *arg = args_get_by_key(self->args, NUM_ARGS, arg_name);
if(!arg) {
fprintf(stderr, "gsr error: invalid argument '%s'\n", arg_name);
usage();
return false;
}
if(arg->num_values > 0 && !arg->list) {
fprintf(stderr, "gsr error: expected argument '%s' to only be specified once\n", arg_name);
usage();
return false;
}
if(i + 1 >= argc) {
fprintf(stderr, "gsr error: missing value for argument '%s'\n", arg_name);
usage();
return false;
}
const char *arg_value = argv[i + 1];
switch(arg->type) {
case ARG_TYPE_STRING: {
break;
}
case ARG_TYPE_BOOLEAN: {
if(strcmp(arg_value, "yes") == 0) {
arg->typed_value.boolean = true;
} else if(strcmp(arg_value, "no") == 0) {
arg->typed_value.boolean = false;
} else {
fprintf(stderr, "gsr error: %s should either be 'yes' or 'no', got: '%s'\n", arg_name, arg_value);
usage();
return false;
}
break;
}
case ARG_TYPE_ENUM: {
if(!arg_get_enum_value_by_name(arg, arg_value, &arg->typed_value.enum_value)) {
fprintf(stderr, "gsr error: %s should either be ", arg_name);
arg_print_expected_enum_names(arg);
fprintf(stderr, ", got: '%s'\n", arg_value);
usage();
return false;
}
break;
}
case ARG_TYPE_I64: {
if(sscanf(arg_value, "%" PRIi64, &arg->typed_value.i64_value) != 1) {
fprintf(stderr, "gsr error: %s argument \"%s\" is not an integer\n", arg_name, arg_value);
usage();
return false;
}
if(arg->typed_value.i64_value < arg->integer_value_min) {
fprintf(stderr, "gsr error: %s argument is expected to be larger than %" PRIi64 ", got %" PRIi64 "\n", arg_name, arg->integer_value_min, arg->typed_value.i64_value);
usage();
return false;
}
if(arg->typed_value.i64_value > arg->integer_value_max) {
fprintf(stderr, "gsr error: %s argument is expected to be less than %" PRIi64 ", got %" PRIi64 "\n", arg_name, arg->integer_value_max, arg->typed_value.i64_value);
usage();
return false;
}
break;
}
case ARG_TYPE_DOUBLE: {
if(sscanf(arg_value, "%lf", &arg->typed_value.d_value) != 1) {
fprintf(stderr, "gsr error: %s argument \"%s\" is not an floating-point number\n", arg_name, arg_value);
usage();
return false;
}
if(arg->typed_value.d_value < arg->integer_value_min) {
fprintf(stderr, "gsr error: %s argument is expected to be larger than %" PRIi64 ", got %lf\n", arg_name, arg->integer_value_min, arg->typed_value.d_value);
usage();
return false;
}
if(arg->typed_value.d_value > arg->integer_value_max) {
fprintf(stderr, "gsr error: %s argument is expected to be less than %" PRIi64 ", got %lf\n", arg_name, arg->integer_value_max, arg->typed_value.d_value);
usage();
return false;
}
break;
}
}
if(!arg_append_value(arg, arg_value)) {
fprintf(stderr, "gsr error: failed to append argument, out of memory\n");
return false;
}
}
for(int i = 0; i < NUM_ARGS; ++i) {
const Arg *arg = &self->args[i];
if(!arg->optional && arg->num_values == 0) {
fprintf(stderr, "gsr error: missing argument '%s'\n", arg->key);
usage();
return false;
}
}
return args_parser_set_values(self);
}
void args_parser_deinit(args_parser *self) {
for(int i = 0; i < NUM_ARGS; ++i) {
arg_deinit(&self->args[i]);
}
}
bool args_parser_validate_with_gl_info(args_parser *self, gsr_egl *egl) {
const bool wayland = gsr_window_get_display_server(egl->window) == GSR_DISPLAY_SERVER_WAYLAND;
if(self->bitrate_mode == (gsr_bitrate_mode)GSR_BITRATE_MODE_AUTO) {
// QP is broken on steam deck, see https://github.com/ValveSoftware/SteamOS/issues/1609
self->bitrate_mode = egl->gpu_info.is_steam_deck ? GSR_BITRATE_MODE_VBR : GSR_BITRATE_MODE_QP;
}
if(egl->gpu_info.is_steam_deck && self->bitrate_mode == GSR_BITRATE_MODE_QP) {
fprintf(stderr, "gsr warning: qp bitrate mode is not supported on Steam Deck because of Steam Deck driver bugs. Using vbr instead\n");
self->bitrate_mode = GSR_BITRATE_MODE_VBR;
}
if(self->video_encoder == GSR_VIDEO_ENCODER_HW_CPU && self->bitrate_mode == GSR_BITRATE_MODE_VBR) {
fprintf(stderr, "gsr warning: bitrate mode has been forcefully set to qp because software encoding option doesn't support vbr option\n");
self->bitrate_mode = GSR_BITRATE_MODE_QP;
}
if(egl->gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA && self->overclock) {
fprintf(stderr, "gsr info: overclock option has no effect on amd/intel, ignoring option\n");
self->overclock = false;
}
if(egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && self->overclock && wayland) {
fprintf(stderr, "gsr info: overclocking is not possible on nvidia on wayland, ignoring option\n");
self->overclock = false;
}
if(egl->gpu_info.is_steam_deck) {
fprintf(stderr, "gsr warning: steam deck has multiple driver issues. One of them has been reported here: https://github.com/ValveSoftware/SteamOS/issues/1609\n"
"If you have issues with GPU Screen Recorder on steam deck that you don't have on a desktop computer then report the issue to Valve and/or AMD.\n");
}
self->very_old_gpu = false;
if(egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && egl->gpu_info.gpu_version != 0 && egl->gpu_info.gpu_version < 900) {
fprintf(stderr, "gsr info: your gpu appears to be very old (older than maxwell architecture). Switching to lower preset\n");
self->very_old_gpu = true;
}
if(video_codec_is_hdr(self->video_codec) && !wayland) {
fprintf(stderr, "gsr error: hdr video codec option %s is not available on X11\n", video_codec_to_string(self->video_codec));
usage();
return false;
}
return true;
}
void args_parser_print_usage(void) {
usage();
}
Arg* args_parser_get_arg(args_parser *self, const char *arg_name) {
return args_get_by_key(self->args, NUM_ARGS, arg_name);
}

53
src/capture/capture.c Normal file
View File

@ -0,0 +1,53 @@
#include "../../include/capture/capture.h"
#include <assert.h>
int gsr_capture_start(gsr_capture *cap, gsr_capture_metadata *capture_metadata) {
assert(!cap->started);
int res = cap->start(cap, capture_metadata);
if(res == 0)
cap->started = true;
return res;
}
void gsr_capture_tick(gsr_capture *cap) {
assert(cap->started);
if(cap->tick)
cap->tick(cap);
}
void gsr_capture_on_event(gsr_capture *cap, gsr_egl *egl) {
if(cap->on_event)
cap->on_event(cap, egl);
}
bool gsr_capture_should_stop(gsr_capture *cap, bool *err) {
assert(cap->started);
if(cap->should_stop)
return cap->should_stop(cap, err);
else
return false;
}
int gsr_capture_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
assert(cap->started);
return cap->capture(cap, capture_metadata, color_conversion);
}
bool gsr_capture_uses_external_image(gsr_capture *cap) {
if(cap->uses_external_image)
return cap->uses_external_image(cap);
else
return false;
}
bool gsr_capture_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
if(cap->set_hdr_metadata)
return cap->set_hdr_metadata(cap, mastering_display_metadata, light_metadata);
else
return false;
}
void gsr_capture_destroy(gsr_capture *cap) {
cap->destroy(cap);
}

738
src/capture/kms.c Normal file
View File

@ -0,0 +1,738 @@
#include "../../include/capture/kms.h"
#include "../../include/utils.h"
#include "../../include/color_conversion.h"
#include "../../include/cursor.h"
#include "../../include/window/window.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <xf86drm.h>
#include <drm_fourcc.h>
#include <libavutil/mastering_display_metadata.h>
#define FIND_CRTC_BY_NAME_TIMEOUT_SECONDS 2.0
#define HDMI_STATIC_METADATA_TYPE1 0
#define HDMI_EOTF_SMPTE_ST2084 2
#define MAX_CONNECTOR_IDS 32
typedef struct {
uint32_t connector_ids[MAX_CONNECTOR_IDS];
int num_connector_ids;
} MonitorId;
typedef struct {
gsr_capture_kms_params params;
vec2i capture_pos;
vec2i capture_size;
MonitorId monitor_id;
gsr_monitor_rotation monitor_rotation;
unsigned int input_texture_id;
unsigned int external_input_texture_id;
unsigned int cursor_texture_id;
bool no_modifiers_fallback;
bool external_texture_fallback;
struct hdr_output_metadata hdr_metadata;
bool hdr_metadata_set;
bool is_x11;
//int drm_fd;
//uint64_t prev_sequence;
//bool damaged;
vec2i prev_target_pos;
vec2i prev_plane_size;
double last_time_monitor_check;
bool capture_is_combined_plane;
gsr_kms_response_item *drm_fd;
vec2i output_size;
vec2i target_pos;
} gsr_capture_kms;
static void gsr_capture_kms_stop(gsr_capture_kms *self) {
if(self->input_texture_id) {
self->params.egl->glDeleteTextures(1, &self->input_texture_id);
self->input_texture_id = 0;
}
if(self->external_input_texture_id) {
self->params.egl->glDeleteTextures(1, &self->external_input_texture_id);
self->external_input_texture_id = 0;
}
if(self->cursor_texture_id) {
self->params.egl->glDeleteTextures(1, &self->cursor_texture_id);
self->cursor_texture_id = 0;
}
// if(self->drm_fd > 0) {
// close(self->drm_fd);
// self->drm_fd = -1;
// }
}
static int max_int(int a, int b) {
return a > b ? a : b;
}
static void gsr_capture_kms_create_input_texture_ids(gsr_capture_kms *self) {
self->params.egl->glGenTextures(1, &self->input_texture_id);
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture_id);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
self->params.egl->glGenTextures(1, &self->external_input_texture_id);
self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->external_input_texture_id);
self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
const int cursor_texture_id_target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
self->params.egl->glGenTextures(1, &self->cursor_texture_id);
self->params.egl->glBindTexture(cursor_texture_id_target, self->cursor_texture_id);
self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(cursor_texture_id_target, 0);
}
/* TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc. */
typedef struct {
MonitorId *monitor_id;
const char *monitor_to_capture;
int monitor_to_capture_len;
int num_monitors;
} MonitorCallbackUserdata;
static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
MonitorCallbackUserdata *monitor_callback_userdata = userdata;
++monitor_callback_userdata->num_monitors;
if(monitor_callback_userdata->monitor_to_capture_len != monitor->name_len || memcmp(monitor_callback_userdata->monitor_to_capture, monitor->name, monitor->name_len) != 0)
return;
if(monitor_callback_userdata->monitor_id->num_connector_ids < MAX_CONNECTOR_IDS) {
monitor_callback_userdata->monitor_id->connector_ids[monitor_callback_userdata->monitor_id->num_connector_ids] = monitor->connector_id;
++monitor_callback_userdata->monitor_id->num_connector_ids;
}
if(monitor_callback_userdata->monitor_id->num_connector_ids == MAX_CONNECTOR_IDS)
fprintf(stderr, "gsr warning: reached max connector ids\n");
}
static vec2i rotate_capture_size_if_rotated(gsr_capture_kms *self, vec2i capture_size) {
if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
int tmp_x = capture_size.x;
capture_size.x = capture_size.y;
capture_size.y = tmp_x;
}
return capture_size;
}
static int gsr_capture_kms_start(gsr_capture *cap, gsr_capture_metadata *capture_metadata) {
gsr_capture_kms *self = cap->priv;
gsr_capture_kms_create_input_texture_ids(self);
gsr_monitor monitor;
self->monitor_id.num_connector_ids = 0;
self->is_x11 = gsr_window_get_display_server(self->params.egl->window) == GSR_DISPLAY_SERVER_X11;
const gsr_connection_type connection_type = self->is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
MonitorCallbackUserdata monitor_callback_userdata = {
&self->monitor_id,
self->params.display_to_capture, strlen(self->params.display_to_capture),
0,
};
for_each_active_monitor_output(self->params.egl->window, self->params.egl->card_path, connection_type, monitor_callback, &monitor_callback_userdata);
if(!get_monitor_by_name(self->params.egl, connection_type, self->params.display_to_capture, &monitor)) {
fprintf(stderr, "gsr error: gsr_capture_kms_start: failed to find monitor by name \"%s\"\n", self->params.display_to_capture);
gsr_capture_kms_stop(self);
return -1;
}
monitor.name = self->params.display_to_capture;
vec2i monitor_position = {0, 0};
drm_monitor_get_display_server_data(self->params.egl->window, &monitor, &self->monitor_rotation, &monitor_position);
self->capture_pos = monitor.pos;
/* Monitor size is already rotated on x11 when the monitor is rotated, no need to apply it ourselves */
if(self->is_x11)
self->capture_size = monitor.size;
else
self->capture_size = rotate_capture_size_if_rotated(self, monitor.size);
if(self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0) {
self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution);
capture_metadata->video_size = self->params.output_resolution;
} else if(self->params.region_size.x > 0 && self->params.region_size.y > 0) {
capture_metadata->video_size = self->params.region_size;
} else {
capture_metadata->video_size = self->capture_size;
}
self->last_time_monitor_check = clock_get_monotonic_seconds();
return 0;
}
// TODO: This is disabled for now because we want to be able to record at a framerate higher than the monitor framerate
// static void gsr_capture_kms_tick(gsr_capture *cap) {
// gsr_capture_kms *self = cap->priv;
// if(self->drm_fd <= 0)
// self->drm_fd = open(self->params.egl->card_path, O_RDONLY);
// if(self->drm_fd <= 0)
// return;
// uint64_t sequence = 0;
// uint64_t ns = 0;
// if(drmCrtcGetSequence(self->drm_fd, 79, &sequence, &ns) != 0)
// return;
// if(sequence != self->prev_sequence) {
// self->prev_sequence = sequence;
// self->damaged = true;
// }
// }
static gsr_kms_response_item* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
for(int i = 0; i < kms_response->num_items; ++i) {
if(kms_response->items[i].connector_id == connector_id && !kms_response->items[i].is_cursor)
return &kms_response->items[i];
}
return NULL;
}
static gsr_kms_response_item* find_largest_drm(gsr_kms_response *kms_response) {
if(kms_response->num_items == 0)
return NULL;
int64_t largest_size = 0;
gsr_kms_response_item *largest_drm = &kms_response->items[0];
for(int i = 0; i < kms_response->num_items; ++i) {
const int64_t size = (int64_t)kms_response->items[i].width * (int64_t)kms_response->items[i].height;
if(size > largest_size && !kms_response->items[i].is_cursor) {
largest_size = size;
largest_drm = &kms_response->items[i];
}
}
return largest_drm;
}
static gsr_kms_response_item* find_cursor_drm(gsr_kms_response *kms_response, uint32_t connector_id) {
gsr_kms_response_item *cursor_drm = NULL;
for(int i = 0; i < kms_response->num_items; ++i) {
if(kms_response->items[i].is_cursor) {
cursor_drm = &kms_response->items[i];
if(kms_response->items[i].connector_id == connector_id)
break;
}
}
return cursor_drm;
}
static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
return hdr_metadata->metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
hdr_metadata->hdmi_metadata_type1.metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
}
// TODO: Check if this hdr data can be changed after the call to av_packet_side_data_add
static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
if(self->hdr_metadata_set)
return;
self->hdr_metadata_set = true;
self->hdr_metadata = drm_fd->hdr_metadata;
}
static vec2i swap_vec2i(vec2i value) {
int tmp = value.x;
value.x = value.y;
value.y = tmp;
return value;
}
static EGLImage gsr_capture_kms_create_egl_image(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, bool use_modifiers) {
intptr_t img_attr[44];
setup_dma_buf_attrs(img_attr, drm_fd->pixel_format, drm_fd->width, drm_fd->height, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs, use_modifiers);
while(self->params.egl->eglGetError() != EGL_SUCCESS){}
EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
if(!image || self->params.egl->eglGetError() != EGL_SUCCESS) {
if(image)
self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
return NULL;
}
return image;
}
static EGLImage gsr_capture_kms_create_egl_image_with_fallback(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
// TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
// Even ffmpeg kmsgrab causes this crash. The error is:
// amdgpu: Failed to allocate a buffer:
// amdgpu: size : 28508160 bytes
// amdgpu: alignment : 2097152 bytes
// amdgpu: domains : 4
// amdgpu: flags : 4
// amdgpu: Failed to allocate a buffer:
// amdgpu: size : 28508160 bytes
// amdgpu: alignment : 2097152 bytes
// amdgpu: domains : 4
// amdgpu: flags : 4
// EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
// [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
// [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
// Error: avcodec_send_frame failed, error: Input/output error
// Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
// kms server info: kms client shutdown, shutting down the server
int fds[GSR_KMS_MAX_DMA_BUFS];
uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
for(int i = 0; i < drm_fd->num_dma_bufs; ++i) {
fds[i] = drm_fd->dma_buf[i].fd;
offsets[i] = drm_fd->dma_buf[i].offset;
pitches[i] = drm_fd->dma_buf[i].pitch;
modifiers[i] = drm_fd->modifier;
}
EGLImage image = NULL;
if(self->no_modifiers_fallback) {
image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
} else {
image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, true);
if(!image) {
fprintf(stderr, "gsr error: gsr_capture_kms_create_egl_image_with_fallback: failed to create egl image with modifiers, trying without modifiers\n");
self->no_modifiers_fallback = true;
image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
}
}
return image;
}
static bool gsr_capture_kms_bind_image_to_texture(gsr_capture_kms *self, EGLImage image, unsigned int texture_id, bool external_texture) {
const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
while(self->params.egl->glGetError() != 0){}
self->params.egl->glBindTexture(texture_target, texture_id);
self->params.egl->glEGLImageTargetTexture2DOES(texture_target, image);
const bool success = self->params.egl->glGetError() == 0;
self->params.egl->glBindTexture(texture_target, 0);
return success;
}
static void gsr_capture_kms_bind_image_to_input_texture_with_fallback(gsr_capture_kms *self, EGLImage image) {
if(self->external_texture_fallback) {
gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
} else {
if(!gsr_capture_kms_bind_image_to_texture(self, image, self->input_texture_id, false)) {
fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to bind image to texture, trying with external texture\n");
self->external_texture_fallback = true;
gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
}
}
}
static gsr_kms_response_item* find_monitor_drm(gsr_capture_kms *self, bool *capture_is_combined_plane) {
*capture_is_combined_plane = false;
gsr_kms_response_item *drm_fd = NULL;
for(int i = 0; i < self->monitor_id.num_connector_ids; ++i) {
drm_fd = find_drm_by_connector_id(self->params.kms_response, self->monitor_id.connector_ids[i]);
if(drm_fd)
break;
}
// Will never happen on wayland unless the target monitor has been disconnected
if(!drm_fd && self->is_x11) {
drm_fd = find_largest_drm(self->params.kms_response);
*capture_is_combined_plane = true;
}
return drm_fd;
}
static gsr_kms_response_item* find_cursor_drm_if_on_monitor(gsr_capture_kms *self, uint32_t monitor_connector_id, bool capture_is_combined_plane) {
gsr_kms_response_item *cursor_drm_fd = find_cursor_drm(self->params.kms_response, monitor_connector_id);
if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != monitor_connector_id)
cursor_drm_fd = NULL;
return cursor_drm_fd;
}
static gsr_monitor_rotation kms_rotation_to_gsr_monitor_rotation(gsr_kms_rotation rotation) {
// Right now both enums have the same values
return (gsr_monitor_rotation)rotation;
}
static int remainder_int(int a, int b) {
return a - (a / b) * b;
}
static gsr_monitor_rotation sub_rotations(gsr_monitor_rotation rot1, gsr_monitor_rotation rot2) {
return remainder_int(rot1 - rot2, 4);
}
static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, gsr_capture_metadata *capture_metadata, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, vec2i output_size, vec2i framebuffer_size) {
const vec2d scale = {
self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x,
self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y
};
const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
const gsr_monitor_rotation cursor_plane_rotation = kms_rotation_to_gsr_monitor_rotation(cursor_drm_fd->rotation);
const gsr_monitor_rotation rotation = sub_rotations(self->monitor_rotation, cursor_plane_rotation);
vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
switch(rotation) {
case GSR_MONITOR_ROT_0:
break;
case GSR_MONITOR_ROT_90:
cursor_pos = swap_vec2i(cursor_pos);
cursor_pos.x = framebuffer_size.x - cursor_pos.x;
// TODO: Remove this horrible hack
cursor_pos.x -= cursor_size.x;
break;
case GSR_MONITOR_ROT_180:
cursor_pos.x = framebuffer_size.x - cursor_pos.x;
cursor_pos.y = framebuffer_size.y - cursor_pos.y;
// TODO: Remove this horrible hack
cursor_pos.x -= cursor_size.x;
cursor_pos.y -= cursor_size.y;
break;
case GSR_MONITOR_ROT_270:
cursor_pos = swap_vec2i(cursor_pos);
cursor_pos.y = framebuffer_size.y - cursor_pos.y;
// TODO: Remove this horrible hack
cursor_pos.y -= cursor_size.y;
break;
}
cursor_pos.x -= self->params.region_position.x;
cursor_pos.y -= self->params.region_position.y;
cursor_pos.x *= scale.x;
cursor_pos.y *= scale.y;
cursor_pos.x += target_pos.x;
cursor_pos.y += target_pos.y;
int fds[GSR_KMS_MAX_DMA_BUFS];
uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
for(int i = 0; i < cursor_drm_fd->num_dma_bufs; ++i) {
fds[i] = cursor_drm_fd->dma_buf[i].fd;
offsets[i] = cursor_drm_fd->dma_buf[i].offset;
pitches[i] = cursor_drm_fd->dma_buf[i].pitch;
modifiers[i] = cursor_drm_fd->modifier;
}
intptr_t img_attr_cursor[44];
setup_dma_buf_attrs(img_attr_cursor, cursor_drm_fd->pixel_format, cursor_drm_fd->width, cursor_drm_fd->height,
fds, offsets, pitches, modifiers, cursor_drm_fd->num_dma_bufs, true);
EGLImage cursor_image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
const int target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
self->params.egl->glBindTexture(target, self->cursor_texture_id);
self->params.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
self->params.egl->glBindTexture(target, 0);
if(cursor_image)
self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
self->params.egl->glEnable(GL_SCISSOR_TEST);
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
gsr_color_conversion_draw(color_conversion, self->cursor_texture_id,
cursor_pos, (vec2i){cursor_size.x * scale.x, cursor_size.y * scale.y},
(vec2i){0, 0}, cursor_size, cursor_size,
gsr_monitor_rotation_to_rotation(rotation), capture_metadata->flip, GSR_SOURCE_COLOR_RGB, cursor_texture_id_is_external);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, gsr_capture_metadata *capture_metadata, vec2i capture_pos, vec2i target_pos, vec2i output_size) {
if(!self->params.x11_cursor->visible)
return;
const vec2d scale = {
self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x,
self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y
};
const vec2i cursor_pos = {
target_pos.x + (self->params.x11_cursor->position.x - self->params.x11_cursor->hotspot.x - capture_pos.x) * scale.x,
target_pos.y + (self->params.x11_cursor->position.y - self->params.x11_cursor->hotspot.y - capture_pos.y) * scale.y
};
self->params.egl->glEnable(GL_SCISSOR_TEST);
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
gsr_color_conversion_draw(color_conversion, self->params.x11_cursor->texture_id,
cursor_pos, (vec2i){self->params.x11_cursor->size.x * scale.x, self->params.x11_cursor->size.y * scale.y},
(vec2i){0, 0}, self->params.x11_cursor->size, self->params.x11_cursor->size,
GSR_ROT_0, capture_metadata->flip, GSR_SOURCE_COLOR_RGB, false);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
static void gsr_capture_kms_update_capture_size_change(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i target_pos, const gsr_kms_response_item *drm_fd) {
if(target_pos.x != self->prev_target_pos.x || target_pos.y != self->prev_target_pos.y || drm_fd->src_w != self->prev_plane_size.x || drm_fd->src_h != self->prev_plane_size.y) {
self->prev_target_pos = target_pos;
self->prev_plane_size = self->capture_size;
color_conversion->schedule_clear = true;
}
}
static void gsr_capture_kms_update_connector_ids(gsr_capture_kms *self) {
const double now = clock_get_monotonic_seconds();
if(now - self->last_time_monitor_check < FIND_CRTC_BY_NAME_TIMEOUT_SECONDS)
return;
self->last_time_monitor_check = now;
/* TODO: Assume for now that there is only 1 framebuffer for all monitors and it doesn't change */
if(self->is_x11)
return;
self->monitor_id.num_connector_ids = 0;
const gsr_connection_type connection_type = self->is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
// MonitorCallbackUserdata monitor_callback_userdata = {
// &self->monitor_id,
// self->params.display_to_capture, strlen(self->params.display_to_capture),
// 0,
// };
// for_each_active_monitor_output(self->params.egl->window, self->params.egl->card_path, connection_type, monitor_callback, &monitor_callback_userdata);
gsr_monitor monitor;
if(!get_monitor_by_name(self->params.egl, connection_type, self->params.display_to_capture, &monitor)) {
fprintf(stderr, "gsr error: gsr_capture_kms_update_connector_ids: failed to find monitor by name \"%s\"\n", self->params.display_to_capture);
return;
}
self->monitor_id.num_connector_ids = 1;
self->monitor_id.connector_ids[0] = monitor.connector_id;
monitor.name = self->params.display_to_capture;
vec2i monitor_position = {0, 0};
// TODO: This is cached. We need it updated.
drm_monitor_get_display_server_data(self->params.egl->window, &monitor, &self->monitor_rotation, &monitor_position);
self->capture_pos = monitor.pos;
/* Monitor size is already rotated on x11 when the monitor is rotated, no need to apply it ourselves */
if(self->is_x11)
self->capture_size = monitor.size;
else
self->capture_size = rotate_capture_size_if_rotated(self, monitor.size);
}
static void gsr_capture_kms_pre_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
gsr_capture_kms *self = cap->priv;
if(self->params.kms_response->num_items == 0) {
static bool error_shown = false;
if(!error_shown) {
error_shown = true;
fprintf(stderr, "gsr error: gsr_capture_kms_pre_capture: no drm found, capture will fail\n");
}
return;
}
gsr_capture_kms_update_connector_ids(self);
self->capture_is_combined_plane = false;
self->drm_fd = find_monitor_drm(self, &self->capture_is_combined_plane);
if(!self->drm_fd)
return;
if(self->drm_fd->has_hdr_metadata && self->params.hdr && hdr_metadata_is_supported_format(&self->drm_fd->hdr_metadata))
gsr_kms_set_hdr_metadata(self, self->drm_fd);
self->capture_size = rotate_capture_size_if_rotated(self, (vec2i){ self->drm_fd->src_w, self->drm_fd->src_h });
if(self->params.region_size.x > 0 && self->params.region_size.y > 0)
self->capture_size = self->params.region_size;
self->output_size = scale_keep_aspect_ratio(self->capture_size, capture_metadata->recording_size);
self->target_pos = gsr_capture_get_target_position(self->output_size, capture_metadata);
gsr_capture_kms_update_capture_size_change(self, color_conversion, self->target_pos, self->drm_fd);
}
static int gsr_capture_kms_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
(void)capture_metadata;
gsr_capture_kms *self = cap->priv;
if(self->params.kms_response->num_items == 0)
return -1;
vec2i capture_pos = self->capture_pos;
if(!self->capture_is_combined_plane)
capture_pos = (vec2i){self->drm_fd->x, self->drm_fd->y};
capture_pos.x += self->params.region_position.x;
capture_pos.y += self->params.region_position.y;
//self->params.egl->glFlush();
//self->params.egl->glFinish();
EGLImage image = gsr_capture_kms_create_egl_image_with_fallback(self, self->drm_fd);
if(image) {
gsr_capture_kms_bind_image_to_input_texture_with_fallback(self, image);
self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
}
const gsr_monitor_rotation plane_rotation = kms_rotation_to_gsr_monitor_rotation(self->drm_fd->rotation);
const gsr_monitor_rotation rotation = self->capture_is_combined_plane ? GSR_MONITOR_ROT_0 : sub_rotations(self->monitor_rotation, plane_rotation);
gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id,
self->target_pos, self->output_size,
capture_pos, self->capture_size, (vec2i){ self->drm_fd->width, self->drm_fd->height },
gsr_monitor_rotation_to_rotation(rotation), capture_metadata->flip, GSR_SOURCE_COLOR_RGB, self->external_texture_fallback);
if(self->params.record_cursor) {
gsr_kms_response_item *cursor_drm_fd = find_cursor_drm_if_on_monitor(self, self->drm_fd->connector_id, self->capture_is_combined_plane);
// The cursor is handled by x11 on x11 instead of using the cursor drm plane because on prime systems with a dedicated nvidia gpu
// the cursor plane is not available when the cursor is on the monitor controlled by the nvidia device.
// TODO: This doesn't work properly with software cursor on x11 since it will draw the x11 cursor on top of the cursor already in the framebuffer.
// Detect if software cursor is used on x11 somehow.
if(self->is_x11) {
vec2i cursor_monitor_offset = self->capture_pos;
cursor_monitor_offset.x += self->params.region_position.x;
cursor_monitor_offset.y += self->params.region_position.y;
render_x11_cursor(self, color_conversion, capture_metadata, cursor_monitor_offset, self->target_pos, self->output_size);
} else if(cursor_drm_fd) {
const vec2i framebuffer_size = rotate_capture_size_if_rotated(self, (vec2i){ self->drm_fd->src_w, self->drm_fd->src_h });
render_drm_cursor(self, color_conversion, capture_metadata, cursor_drm_fd, self->target_pos, self->output_size, framebuffer_size);
}
}
//self->params.egl->glFlush();
//self->params.egl->glFinish();
return 0;
}
static bool gsr_capture_kms_should_stop(gsr_capture *cap, bool *err) {
(void)cap;
if(err)
*err = false;
return false;
}
static bool gsr_capture_kms_uses_external_image(gsr_capture *cap) {
(void)cap;
return true;
}
static bool gsr_capture_kms_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
gsr_capture_kms *self = cap->priv;
if(!self->hdr_metadata_set)
return false;
light_metadata->MaxCLL = self->hdr_metadata.hdmi_metadata_type1.max_cll;
light_metadata->MaxFALL = self->hdr_metadata.hdmi_metadata_type1.max_fall;
for(int i = 0; i < 3; ++i) {
mastering_display_metadata->display_primaries[i][0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
mastering_display_metadata->display_primaries[i][1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
}
mastering_display_metadata->white_point[0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
mastering_display_metadata->white_point[1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
mastering_display_metadata->min_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
mastering_display_metadata->max_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
mastering_display_metadata->has_primaries = true;
mastering_display_metadata->has_luminance = true;
return true;
}
// static bool gsr_capture_kms_is_damaged(gsr_capture *cap) {
// gsr_capture_kms *self = cap->priv;
// return self->damaged;
// }
// static void gsr_capture_kms_clear_damage(gsr_capture *cap) {
// gsr_capture_kms *self = cap->priv;
// self->damaged = false;
// }
static void gsr_capture_kms_destroy(gsr_capture *cap) {
gsr_capture_kms *self = cap->priv;
if(cap->priv) {
gsr_capture_kms_stop(self);
free((void*)self->params.display_to_capture);
self->params.display_to_capture = NULL;
free(cap->priv);
cap->priv = NULL;
}
free(cap);
}
gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params) {
if(!params) {
fprintf(stderr, "gsr error: gsr_capture_kms_create params is NULL\n");
return NULL;
}
gsr_capture *cap = calloc(1, sizeof(gsr_capture));
if(!cap)
return NULL;
gsr_capture_kms *cap_kms = calloc(1, sizeof(gsr_capture_kms));
if(!cap_kms) {
free(cap);
return NULL;
}
const char *display_to_capture = strdup(params->display_to_capture);
if(!display_to_capture) {
free(cap);
free(cap_kms);
return NULL;
}
cap_kms->params = *params;
cap_kms->params.display_to_capture = display_to_capture;
*cap = (gsr_capture) {
.start = gsr_capture_kms_start,
//.tick = gsr_capture_kms_tick,
.should_stop = gsr_capture_kms_should_stop,
.pre_capture = gsr_capture_kms_pre_capture,
.capture = gsr_capture_kms_capture,
.uses_external_image = gsr_capture_kms_uses_external_image,
.set_hdr_metadata = gsr_capture_kms_set_hdr_metadata,
//.is_damaged = gsr_capture_kms_is_damaged,
//.clear_damage = gsr_capture_kms_clear_damage,
.destroy = gsr_capture_kms_destroy,
.priv = cap_kms
};
return cap;
}

457
src/capture/nvfbc.c Normal file
View File

@ -0,0 +1,457 @@
#include "../../include/capture/nvfbc.h"
#include "../../external/NvFBC.h"
#include "../../include/egl.h"
#include "../../include/utils.h"
#include "../../include/color_conversion.h"
#include "../../include/window/window.h"
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <X11/Xlib.h>
#include <X11/extensions/Xrandr.h>
typedef struct {
gsr_capture_nvfbc_params params;
void *library;
NVFBC_SESSION_HANDLE nv_fbc_handle;
PNVFBCCREATEINSTANCE nv_fbc_create_instance;
NVFBC_API_FUNCTION_LIST nv_fbc_function_list;
bool fbc_handle_created;
bool capture_session_created;
NVFBC_TOGL_SETUP_PARAMS setup_params;
bool supports_direct_cursor;
uint32_t width, height;
NVFBC_TRACKING_TYPE tracking_type;
uint32_t output_id;
uint32_t tracking_width, tracking_height;
bool nvfbc_needs_recreate;
double nvfbc_dead_start;
} gsr_capture_nvfbc;
static int max_int(int a, int b) {
return a > b ? a : b;
}
/* Returns 0 on failure */
static uint32_t get_output_id_from_display_name(NVFBC_RANDR_OUTPUT_INFO *outputs, uint32_t num_outputs, const char *display_name, uint32_t *width, uint32_t *height) {
if(!outputs)
return 0;
for(uint32_t i = 0; i < num_outputs; ++i) {
if(strcmp(outputs[i].name, display_name) == 0) {
*width = outputs[i].trackedBox.w;
*height = outputs[i].trackedBox.h;
return outputs[i].dwId;
}
}
return 0;
}
static bool version_at_least(int major, int minor, int expected_major, int expected_minor) {
return major > expected_major || (major == expected_major && minor >= expected_minor);
}
static bool version_less_than(int major, int minor, int expected_major, int expected_minor) {
return major < expected_major || (major == expected_major && minor < expected_minor);
}
static void set_func_ptr(void **dst, void *src) {
*dst = src;
}
static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
gsr_capture_nvfbc *self = cap->priv;
dlerror(); /* clear */
void *lib = dlopen("libnvidia-fbc.so.1", RTLD_LAZY);
if(!lib) {
fprintf(stderr, "gsr error: failed to load libnvidia-fbc.so.1, error: %s\n", dlerror());
return false;
}
set_func_ptr((void**)&self->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
if(!self->nv_fbc_create_instance) {
fprintf(stderr, "gsr error: unable to resolve symbol 'NvFBCCreateInstance'\n");
dlclose(lib);
return false;
}
memset(&self->nv_fbc_function_list, 0, sizeof(self->nv_fbc_function_list));
self->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
NVFBCSTATUS status = self->nv_fbc_create_instance(&self->nv_fbc_function_list);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: failed to create NvFBC instance (status: %d)\n", status);
dlclose(lib);
return false;
}
self->library = lib;
return true;
}
static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *self) {
if(self->fbc_handle_created && self->capture_session_created) {
NVFBC_DESTROY_CAPTURE_SESSION_PARAMS destroy_capture_params;
memset(&destroy_capture_params, 0, sizeof(destroy_capture_params));
destroy_capture_params.dwVersion = NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER;
self->nv_fbc_function_list.nvFBCDestroyCaptureSession(self->nv_fbc_handle, &destroy_capture_params);
self->capture_session_created = false;
}
}
static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *self) {
if(self->fbc_handle_created) {
NVFBC_DESTROY_HANDLE_PARAMS destroy_params;
memset(&destroy_params, 0, sizeof(destroy_params));
destroy_params.dwVersion = NVFBC_DESTROY_HANDLE_PARAMS_VER;
self->nv_fbc_function_list.nvFBCDestroyHandle(self->nv_fbc_handle, &destroy_params);
self->fbc_handle_created = false;
self->nv_fbc_handle = 0;
}
}
static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *self) {
gsr_capture_nvfbc_destroy_session(self);
gsr_capture_nvfbc_destroy_handle(self);
}
static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *self) {
NVFBCSTATUS status;
NVFBC_CREATE_HANDLE_PARAMS create_params;
memset(&create_params, 0, sizeof(create_params));
create_params.dwVersion = NVFBC_CREATE_HANDLE_PARAMS_VER;
create_params.bExternallyManagedContext = NVFBC_TRUE;
create_params.glxCtx = self->params.egl->glx_context;
create_params.glxFBConfig = self->params.egl->glx_fb_config;
status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
// Reverse engineering for interoperability
const uint8_t enable_key[] = { 0xac, 0x10, 0xc9, 0x2e, 0xa5, 0xe6, 0x87, 0x4f, 0x8f, 0x4b, 0xf4, 0x61, 0xf8, 0x56, 0x27, 0xe9 };
create_params.privateData = enable_key;
create_params.privateDataSize = 16;
status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
}
self->fbc_handle_created = true;
NVFBC_GET_STATUS_PARAMS status_params;
memset(&status_params, 0, sizeof(status_params));
status_params.dwVersion = NVFBC_GET_STATUS_PARAMS_VER;
status = self->nv_fbc_function_list.nvFBCGetStatus(self->nv_fbc_handle, &status_params);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
if(status_params.bCanCreateNow == NVFBC_FALSE) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: it's not possible to create a capture session on this system\n");
goto error_cleanup;
}
assert(gsr_window_get_display_server(self->params.egl->window) == GSR_DISPLAY_SERVER_X11);
Display *display = gsr_window_get_display(self->params.egl->window);
self->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(display));
self->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(display));
self->tracking_type = strcmp(self->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
if(self->tracking_type == NVFBC_TRACKING_OUTPUT) {
if(!status_params.bXRandRAvailable) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: the xrandr extension is not available\n");
goto error_cleanup;
}
if(status_params.bInModeset) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: the x server is in modeset, unable to record\n");
goto error_cleanup;
}
self->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, self->params.display_to_capture, &self->tracking_width, &self->tracking_height);
if(self->output_id == 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", self->params.display_to_capture);
goto error_cleanup;
}
}
self->width = self->tracking_width;
self->height = self->tracking_height;
return 0;
error_cleanup:
gsr_capture_nvfbc_destroy_session_and_handle(self);
return -1;
}
static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *self) {
NVFBC_CREATE_CAPTURE_SESSION_PARAMS create_capture_params;
memset(&create_capture_params, 0, sizeof(create_capture_params));
create_capture_params.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
create_capture_params.eCaptureType = NVFBC_CAPTURE_TO_GL;
create_capture_params.bWithCursor = (!self->params.direct_capture || self->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
if(!self->params.record_cursor)
create_capture_params.bWithCursor = false;
create_capture_params.eTrackingType = self->tracking_type;
create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)self->params.fps);
create_capture_params.bAllowDirectCapture = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
create_capture_params.bPushModel = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
create_capture_params.bDisableAutoModesetRecovery = true;
if(self->tracking_type == NVFBC_TRACKING_OUTPUT)
create_capture_params.dwOutputId = self->output_id;
NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCCreateCaptureSession(self->nv_fbc_handle, &create_capture_params);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
return -1;
}
self->capture_session_created = true;
memset(&self->setup_params, 0, sizeof(self->setup_params));
self->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
self->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
status = self->nv_fbc_function_list.nvFBCToGLSetUp(self->nv_fbc_handle, &self->setup_params);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
gsr_capture_nvfbc_destroy_session(self);
return -1;
}
return 0;
}
static void gsr_capture_nvfbc_stop(gsr_capture_nvfbc *self) {
gsr_capture_nvfbc_destroy_session_and_handle(self);
if(self->library) {
dlclose(self->library);
self->library = NULL;
}
if(self->params.display_to_capture) {
free((void*)self->params.display_to_capture);
self->params.display_to_capture = NULL;
}
}
static int gsr_capture_nvfbc_start(gsr_capture *cap, gsr_capture_metadata *capture_metadata) {
gsr_capture_nvfbc *self = cap->priv;
if(!gsr_capture_nvfbc_load_library(cap))
return -1;
self->supports_direct_cursor = false;
int driver_major_version = 0;
int driver_minor_version = 0;
if(self->params.direct_capture && get_nvidia_driver_version(&driver_major_version, &driver_minor_version)) {
fprintf(stderr, "gsr info: detected nvidia version: %d.%d\n", driver_major_version, driver_minor_version);
// TODO:
if(version_at_least(driver_major_version, driver_minor_version, 515, 57) && version_less_than(driver_major_version, driver_minor_version, 520, 56)) {
self->params.direct_capture = false;
fprintf(stderr, "gsr warning: \"screen-direct\" has temporary been disabled as it causes stuttering with driver versions >= 515.57 and < 520.56. Please update your driver if possible. Capturing \"screen\" instead.\n");
}
// TODO:
// Cursor capture disabled because moving the cursor doesn't update capture rate to monitor hz and instead captures at 10-30 hz
/*
if(direct_capture) {
if(version_at_least(driver_major_version, driver_minor_version, 515, 57))
self->supports_direct_cursor = true;
else
fprintf(stderr, "gsr info: capturing \"screen-direct\" but driver version appears to be less than 515.57. Disabling capture of cursor. Please update your driver if you want to capture your cursor or record \"screen\" instead.\n");
}
*/
}
if(gsr_capture_nvfbc_setup_handle(self) != 0) {
goto error_cleanup;
}
if(gsr_capture_nvfbc_setup_session(self) != 0) {
goto error_cleanup;
}
capture_metadata->video_size.x = self->tracking_width;
capture_metadata->video_size.y = self->tracking_height;
if(self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0) {
self->params.output_resolution = scale_keep_aspect_ratio(capture_metadata->video_size, self->params.output_resolution);
capture_metadata->video_size = self->params.output_resolution;
} else if(self->params.region_size.x > 0 && self->params.region_size.y > 0) {
capture_metadata->video_size = self->params.region_size;
}
return 0;
error_cleanup:
gsr_capture_nvfbc_stop(self);
return -1;
}
static bool gsr_capture_nvfbc_is_capture_monitor_connected(gsr_capture_nvfbc *self) {
Display *dpy = gsr_window_get_display(self->params.egl->window);
int num_monitors = 0;
XRRMonitorInfo *monitors = XRRGetMonitors(dpy, DefaultRootWindow(dpy), True, &num_monitors);
if(!monitors)
return false;
bool capture_monitor_connected = false;
if(strcmp(self->params.display_to_capture, "screen") == 0) {
capture_monitor_connected = num_monitors > 0;
} else {
for(int i = 0; i < num_monitors; ++i) {
char *monitor_name = XGetAtomName(dpy, monitors[i].name);
if(!monitor_name)
continue;
if(strcmp(monitor_name, self->params.display_to_capture) == 0) {
capture_monitor_connected = true;
XFree(monitor_name);
break;
}
XFree(monitor_name);
}
}
XRRFreeMonitors(monitors);
return capture_monitor_connected;
}
static int gsr_capture_nvfbc_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
gsr_capture_nvfbc *self = cap->priv;
const double nvfbc_recreate_retry_time_seconds = 1.0;
if(self->nvfbc_needs_recreate) {
const double now = clock_get_monotonic_seconds();
if(now - self->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
self->nvfbc_dead_start = now;
/*
Do not attempt to recreate the nvfbc session if the monitor isn't turned on/connected.
This is to predict if the nvfbc session create below will fail since if it fails it leaks an x11 display (a bug in the nvidia driver).
*/
if(!gsr_capture_nvfbc_is_capture_monitor_connected(self))
return 0;
gsr_capture_nvfbc_destroy_session_and_handle(self);
if(gsr_capture_nvfbc_setup_handle(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc handle, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
if(gsr_capture_nvfbc_setup_session(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc session, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
fprintf(stderr, "gsr info: gsr_capture_nvfbc_capture: recreated nvfbc session after modeset recovery\n");
self->nvfbc_needs_recreate = false;
} else {
return 0;
}
}
vec2i frame_size = (vec2i){self->width, self->height};
const vec2i original_frame_size = frame_size;
if(self->params.region_size.x > 0 && self->params.region_size.y > 0)
frame_size = self->params.region_size;
const vec2i output_size = scale_keep_aspect_ratio(frame_size, capture_metadata->recording_size);
const vec2i target_pos = gsr_capture_get_target_position(output_size, capture_metadata);
NVFBC_FRAME_GRAB_INFO frame_info;
memset(&frame_info, 0, sizeof(frame_info));
NVFBC_TOGL_GRAB_FRAME_PARAMS grab_params;
memset(&grab_params, 0, sizeof(grab_params));
grab_params.dwVersion = NVFBC_TOGL_GRAB_FRAME_PARAMS_VER;
grab_params.dwFlags = NVFBC_TOGL_GRAB_FLAGS_NOWAIT | NVFBC_TOGL_GRAB_FLAGS_FORCE_REFRESH; // TODO: Remove NVFBC_TOGL_GRAB_FLAGS_FORCE_REFRESH
grab_params.pFrameGrabInfo = &frame_info;
grab_params.dwTimeoutMs = 0;
NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCToGLGrabFrame(self->nv_fbc_handle, &grab_params);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
self->nvfbc_needs_recreate = true;
self->nvfbc_dead_start = clock_get_monotonic_seconds();
return 0;
}
//self->params.egl->glFlush();
//self->params.egl->glFinish();
gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex],
target_pos, (vec2i){output_size.x, output_size.y},
self->params.region_position, frame_size, original_frame_size,
GSR_ROT_0, capture_metadata->flip, GSR_SOURCE_COLOR_BGR, false);
//self->params.egl->glFlush();
//self->params.egl->glFinish();
return 0;
}
static void gsr_capture_nvfbc_destroy(gsr_capture *cap) {
gsr_capture_nvfbc *self = cap->priv;
gsr_capture_nvfbc_stop(self);
free(cap->priv);
free(cap);
}
gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
if(!params) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_create params is NULL\n");
return NULL;
}
if(!params->display_to_capture) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_create params.display_to_capture is NULL\n");
return NULL;
}
gsr_capture *cap = calloc(1, sizeof(gsr_capture));
if(!cap)
return NULL;
gsr_capture_nvfbc *cap_nvfbc = calloc(1, sizeof(gsr_capture_nvfbc));
if(!cap_nvfbc) {
free(cap);
return NULL;
}
const char *display_to_capture = strdup(params->display_to_capture);
if(!display_to_capture) {
free(cap);
free(cap_nvfbc);
return NULL;
}
cap_nvfbc->params = *params;
cap_nvfbc->params.display_to_capture = display_to_capture;
cap_nvfbc->params.fps = max_int(cap_nvfbc->params.fps, 1);
*cap = (gsr_capture) {
.start = gsr_capture_nvfbc_start,
.tick = NULL,
.should_stop = NULL,
.capture = gsr_capture_nvfbc_capture,
.uses_external_image = NULL,
.destroy = gsr_capture_nvfbc_destroy,
.priv = cap_nvfbc
};
return cap;
}

482
src/capture/portal.c Normal file
View File

@ -0,0 +1,482 @@
#include "../../include/capture/portal.h"
#include "../../include/color_conversion.h"
#include "../../include/egl.h"
#include "../../include/utils.h"
#include "../../include/dbus.h"
#include "../../include/pipewire_video.h"
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <limits.h>
#include <assert.h>
#define PORTAL_CAPTURE_CANCELED_BY_USER_EXIT_CODE 60
typedef enum {
PORTAL_CAPTURE_SETUP_IDLE,
PORTAL_CAPTURE_SETUP_IN_PROGRESS,
PORTAL_CAPTURE_SETUP_FINISHED,
PORTAL_CAPTURE_SETUP_FAILED
} gsr_portal_capture_setup_state;
typedef struct {
gsr_capture_portal_params params;
gsr_texture_map texture_map;
gsr_dbus dbus;
char *session_handle;
gsr_pipewire_video pipewire;
vec2i capture_size;
gsr_map_texture_output pipewire_data;
bool should_stop;
bool stop_is_error;
bool do_capture;
} gsr_capture_portal;
static void gsr_capture_portal_cleanup_plane_fds(gsr_capture_portal *self) {
for(int i = 0; i < self->pipewire_data.num_dmabuf_data; ++i) {
if(self->pipewire_data.dmabuf_data[i].fd > 0) {
close(self->pipewire_data.dmabuf_data[i].fd);
self->pipewire_data.dmabuf_data[i].fd = 0;
}
}
self->pipewire_data.num_dmabuf_data = 0;
}
static void gsr_capture_portal_stop(gsr_capture_portal *self) {
if(self->texture_map.texture_id) {
self->params.egl->glDeleteTextures(1, &self->texture_map.texture_id);
self->texture_map.texture_id = 0;
}
if(self->texture_map.external_texture_id) {
self->params.egl->glDeleteTextures(1, &self->texture_map.external_texture_id);
self->texture_map.external_texture_id = 0;
}
if(self->texture_map.cursor_texture_id) {
self->params.egl->glDeleteTextures(1, &self->texture_map.cursor_texture_id);
self->texture_map.cursor_texture_id = 0;
}
gsr_capture_portal_cleanup_plane_fds(self);
gsr_pipewire_video_deinit(&self->pipewire);
gsr_dbus_deinit(&self->dbus);
}
static void gsr_capture_portal_create_input_textures(gsr_capture_portal *self) {
self->params.egl->glGenTextures(1, &self->texture_map.texture_id);
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.texture_id);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
self->params.egl->glGenTextures(1, &self->texture_map.external_texture_id);
self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->texture_map.external_texture_id);
self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
self->params.egl->glGenTextures(1, &self->texture_map.cursor_texture_id);
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.cursor_texture_id);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
}
static void get_default_gpu_screen_recorder_restore_token_path(char *buffer, size_t buffer_size) {
const char *xdg_config_home = getenv("XDG_CONFIG_HOME");
if(xdg_config_home) {
snprintf(buffer, buffer_size, "%s/gpu-screen-recorder/restore_token", xdg_config_home);
} else {
const char *home = getenv("HOME");
if(!home)
home = "/tmp";
snprintf(buffer, buffer_size, "%s/.config/gpu-screen-recorder/restore_token", home);
}
}
static bool create_directory_to_file(const char *filepath) {
char dir[PATH_MAX];
dir[0] = '\0';
const char *split = strrchr(filepath, '/');
if(!split) /* Assuming it's the current directory (for example if filepath is "restore_token"), which doesn't need to be created */
return true;
snprintf(dir, sizeof(dir), "%.*s", (int)(split - filepath), filepath);
if(create_directory_recursive(dir) != 0) {
fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create directory (%s) for restore token\n", dir);
return false;
}
return true;
}
static void gsr_capture_portal_save_restore_token(const char *restore_token, const char *portal_session_token_filepath) {
char restore_token_path[PATH_MAX];
restore_token_path[0] = '\0';
if(portal_session_token_filepath)
snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
else
get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
if(!create_directory_to_file(restore_token_path))
return;
FILE *f = fopen(restore_token_path, "wb");
if(!f) {
fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create restore token file (%s)\n", restore_token_path);
return;
}
const int restore_token_len = strlen(restore_token);
if((long)fwrite(restore_token, 1, restore_token_len, f) != restore_token_len) {
fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to write restore token to file (%s)\n", restore_token_path);
fclose(f);
return;
}
fprintf(stderr, "gsr info: gsr_capture_portal_save_restore_token: saved restore token to cache (%s)\n", restore_token);
fclose(f);
}
static void gsr_capture_portal_get_restore_token_from_cache(char *buffer, size_t buffer_size, const char *portal_session_token_filepath) {
assert(buffer_size > 0);
buffer[0] = '\0';
char restore_token_path[PATH_MAX];
restore_token_path[0] = '\0';
if(portal_session_token_filepath)
snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
else
get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
FILE *f = fopen(restore_token_path, "rb");
if(!f) {
fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: no restore token found in cache or failed to load (%s)\n", restore_token_path);
return;
}
fseek(f, 0, SEEK_END);
long file_size = ftell(f);
fseek(f, 0, SEEK_SET);
if(file_size > 0 && file_size < 1024 && file_size < (long)buffer_size && (long)fread(buffer, 1, file_size, f) != file_size) {
buffer[0] = '\0';
fprintf(stderr, "gsr warning: gsr_capture_portal_get_restore_token_from_cache: failed to read restore token (%s)\n", restore_token_path);
fclose(f);
return;
}
if(file_size > 0 && file_size < (long)buffer_size)
buffer[file_size] = '\0';
fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: read cached restore token (%s)\n", buffer);
fclose(f);
}
static int gsr_capture_portal_setup_dbus(gsr_capture_portal *self, int *pipewire_fd, uint32_t *pipewire_node) {
*pipewire_fd = 0;
*pipewire_node = 0;
int response_status = 0;
char restore_token[1024];
restore_token[0] = '\0';
if(self->params.restore_portal_session)
gsr_capture_portal_get_restore_token_from_cache(restore_token, sizeof(restore_token), self->params.portal_session_token_filepath);
if(!gsr_dbus_init(&self->dbus, restore_token))
return -1;
fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: CreateSession\n");
response_status = gsr_dbus_screencast_create_session(&self->dbus, &self->session_handle);
if(response_status != 0) {
fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: CreateSession failed\n");
return response_status;
}
fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: SelectSources\n");
response_status = gsr_dbus_screencast_select_sources(&self->dbus, self->session_handle, GSR_PORTAL_CAPTURE_TYPE_ALL, self->params.record_cursor ? GSR_PORTAL_CURSOR_MODE_EMBEDDED : GSR_PORTAL_CURSOR_MODE_HIDDEN);
if(response_status != 0) {
fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: SelectSources failed\n");
return response_status;
}
fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: Start\n");
response_status = gsr_dbus_screencast_start(&self->dbus, self->session_handle, pipewire_node);
if(response_status != 0) {
fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: Start failed\n");
return response_status;
}
const char *screencast_restore_token = gsr_dbus_screencast_get_restore_token(&self->dbus);
if(screencast_restore_token)
gsr_capture_portal_save_restore_token(screencast_restore_token, self->params.portal_session_token_filepath);
fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: OpenPipeWireRemote\n");
if(!gsr_dbus_screencast_open_pipewire_remote(&self->dbus, self->session_handle, pipewire_fd)) {
fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: OpenPipeWireRemote failed\n");
return -1;
}
fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: desktop portal setup finished\n");
return 0;
}
static bool gsr_capture_portal_get_frame_dimensions(gsr_capture_portal *self) {
fprintf(stderr, "gsr info: gsr_capture_portal_start: waiting for pipewire negotiation\n");
const double start_time = clock_get_monotonic_seconds();
while(clock_get_monotonic_seconds() - start_time < 5.0) {
if(gsr_pipewire_video_map_texture(&self->pipewire, self->texture_map, &self->pipewire_data)) {
self->capture_size.x = self->pipewire_data.region.width;
self->capture_size.y = self->pipewire_data.region.height;
fprintf(stderr, "gsr info: gsr_capture_portal_start: pipewire negotiation finished\n");
return true;
}
usleep(30 * 1000); /* 30 milliseconds */
}
fprintf(stderr, "gsr info: gsr_capture_portal_start: timed out waiting for pipewire negotiation (5 seconds)\n");
return false;
}
static int gsr_capture_portal_setup(gsr_capture_portal *self, int fps) {
gsr_capture_portal_create_input_textures(self);
int pipewire_fd = 0;
uint32_t pipewire_node = 0;
const int response_status = gsr_capture_portal_setup_dbus(self, &pipewire_fd, &pipewire_node);
if(response_status != 0) {
// Response status values:
// 0: Success, the request is carried out
// 1: The user cancelled the interaction
// 2: The user interaction was ended in some other way
// Response status value 2 happens usually if there was some kind of error in the desktop portal on the system
if(response_status == 2) {
fprintf(stderr, "gsr error: gsr_capture_portal_setup: desktop portal capture failed. Either you Wayland compositor doesn't support desktop portal capture or it's incorrectly setup on your system\n");
return 50;
} else if(response_status == 1) {
fprintf(stderr, "gsr error: gsr_capture_portal_setup: desktop portal capture failed. It seems like desktop portal capture was canceled by the user.\n");
return PORTAL_CAPTURE_CANCELED_BY_USER_EXIT_CODE;
} else {
return -1;
}
}
fprintf(stderr, "gsr info: gsr_capture_portal_setup: setting up pipewire\n");
/* TODO: support hdr when pipewire supports it */
/* gsr_pipewire closes the pipewire fd, even on failure */
if(!gsr_pipewire_video_init(&self->pipewire, pipewire_fd, pipewire_node, fps, self->params.record_cursor, self->params.egl)) {
fprintf(stderr, "gsr error: gsr_capture_portal_setup: failed to setup pipewire with fd: %d, node: %" PRIu32 "\n", pipewire_fd, pipewire_node);
return -1;
}
fprintf(stderr, "gsr info: gsr_capture_portal_setup: pipewire setup finished\n");
if(!gsr_capture_portal_get_frame_dimensions(self))
return -1;
return 0;
}
static int gsr_capture_portal_start(gsr_capture *cap, gsr_capture_metadata *capture_metadata) {
gsr_capture_portal *self = cap->priv;
const int result = gsr_capture_portal_setup(self, capture_metadata->fps);
if(result != 0) {
gsr_capture_portal_stop(self);
return result;
}
if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) {
capture_metadata->video_size = self->capture_size;
} else {
self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution);
capture_metadata->video_size = self->params.output_resolution;
}
return 0;
}
static int max_int(int a, int b) {
return a > b ? a : b;
}
static bool gsr_capture_portal_capture_has_synchronous_task(gsr_capture *cap) {
gsr_capture_portal *self = cap->priv;
return gsr_pipewire_video_should_restart(&self->pipewire);
}
static bool fourcc_has_alpha(uint32_t fourcc) {
const uint8_t *p = (const uint8_t*)&fourcc;
for(int i = 0; i < 4; ++i) {
if(p[i] == 'A')
return true;
}
return false;
}
static void gsr_capture_portal_pre_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
gsr_capture_portal *self = cap->priv;
self->do_capture = false;
if(self->should_stop)
return;
if(gsr_pipewire_video_should_restart(&self->pipewire)) {
fprintf(stderr, "gsr info: gsr_capture_portal_pre_capture: pipewire capture was paused, trying to start capture again\n");
gsr_capture_portal_stop(self);
const int result = gsr_capture_portal_setup(self, capture_metadata->fps);
if(result != 0) {
self->stop_is_error = result != PORTAL_CAPTURE_CANCELED_BY_USER_EXIT_CODE;
self->should_stop = true;
}
return;
}
/* TODO: Handle formats other than RGB(A) */
if(self->pipewire_data.num_dmabuf_data == 0) {
if(gsr_pipewire_video_map_texture(&self->pipewire, self->texture_map, &self->pipewire_data)) {
if(self->pipewire_data.region.width != self->capture_size.x || self->pipewire_data.region.height != self->capture_size.y) {
self->capture_size.x = self->pipewire_data.region.width;
self->capture_size.y = self->pipewire_data.region.height;
color_conversion->schedule_clear = true;
}
} else {
return;
}
}
const bool fourcc_alpha = fourcc_has_alpha(self->pipewire_data.fourcc);
if(fourcc_alpha)
color_conversion->schedule_clear = true;
self->do_capture = true;
}
static int gsr_capture_portal_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
(void)color_conversion;
gsr_capture_portal *self = cap->priv;
if(self->should_stop || !self->do_capture)
return -1;
const vec2i output_size = scale_keep_aspect_ratio(self->capture_size, capture_metadata->recording_size);
const vec2i target_pos = gsr_capture_get_target_position(output_size, capture_metadata);
const vec2i actual_texture_size = {self->pipewire_data.texture_width, self->pipewire_data.texture_height};
//self->params.egl->glFlush();
//self->params.egl->glFinish();
// TODO: Handle region crop
gsr_color_conversion_draw(color_conversion, self->pipewire_data.using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id,
target_pos, output_size,
(vec2i){self->pipewire_data.region.x, self->pipewire_data.region.y}, (vec2i){self->pipewire_data.region.width, self->pipewire_data.region.height}, actual_texture_size,
gsr_monitor_rotation_to_rotation(self->pipewire_data.rotation), capture_metadata->flip, GSR_SOURCE_COLOR_RGB, self->pipewire_data.using_external_image);
if(self->params.record_cursor && self->texture_map.cursor_texture_id > 0 && self->pipewire_data.cursor_region.width > 0) {
const vec2d scale = {
self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x,
self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y
};
const vec2i cursor_pos = {
target_pos.x + (self->pipewire_data.cursor_region.x * scale.x),
target_pos.y + (self->pipewire_data.cursor_region.y * scale.y)
};
self->params.egl->glEnable(GL_SCISSOR_TEST);
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id,
(vec2i){cursor_pos.x, cursor_pos.y},
(vec2i){self->pipewire_data.cursor_region.width * scale.x, self->pipewire_data.cursor_region.height * scale.y},
(vec2i){0, 0},
(vec2i){self->pipewire_data.cursor_region.width, self->pipewire_data.cursor_region.height},
(vec2i){self->pipewire_data.cursor_region.width, self->pipewire_data.cursor_region.height},
gsr_monitor_rotation_to_rotation(self->pipewire_data.rotation), capture_metadata->flip, GSR_SOURCE_COLOR_RGB, false);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
//self->params.egl->glFlush();
//self->params.egl->glFinish();
gsr_capture_portal_cleanup_plane_fds(self);
return 0;
}
static bool gsr_capture_portal_uses_external_image(gsr_capture *cap) {
(void)cap;
return true;
}
static bool gsr_capture_portal_should_stop(gsr_capture *cap, bool *err) {
gsr_capture_portal *self = cap->priv;
if(err)
*err = self->stop_is_error;
return self->should_stop;
}
static bool gsr_capture_portal_is_damaged(gsr_capture *cap) {
gsr_capture_portal *self = cap->priv;
return gsr_pipewire_video_is_damaged(&self->pipewire);
}
static void gsr_capture_portal_clear_damage(gsr_capture *cap) {
gsr_capture_portal *self = cap->priv;
gsr_pipewire_video_clear_damage(&self->pipewire);
}
static void gsr_capture_portal_destroy(gsr_capture *cap) {
gsr_capture_portal *self = cap->priv;
if(cap->priv) {
gsr_capture_portal_stop(self);
free(cap->priv);
cap->priv = NULL;
}
free(cap);
}
gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params) {
if(!params) {
fprintf(stderr, "gsr error: gsr_capture_portal_create params is NULL\n");
return NULL;
}
gsr_capture *cap = calloc(1, sizeof(gsr_capture));
if(!cap)
return NULL;
gsr_capture_portal *cap_portal = calloc(1, sizeof(gsr_capture_portal));
if(!cap_portal) {
free(cap);
return NULL;
}
cap_portal->params = *params;
*cap = (gsr_capture) {
.start = gsr_capture_portal_start,
.tick = NULL,
.should_stop = gsr_capture_portal_should_stop,
.capture_has_synchronous_task = gsr_capture_portal_capture_has_synchronous_task,
.pre_capture = gsr_capture_portal_pre_capture,
.capture = gsr_capture_portal_capture,
.uses_external_image = gsr_capture_portal_uses_external_image,
.is_damaged = gsr_capture_portal_is_damaged,
.clear_damage = gsr_capture_portal_clear_damage,
.destroy = gsr_capture_portal_destroy,
.priv = cap_portal
};
return cap;
}

684
src/capture/v4l2.c Normal file
View File

@ -0,0 +1,684 @@
#include "../../include/capture/v4l2.h"
#include "../../include/color_conversion.h"
#include "../../include/egl.h"
#include "../../include/utils.h"
#include <dlfcn.h>
#include <fcntl.h>
#include <unistd.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <linux/videodev2.h>
#include <linux/dma-buf.h>
#include <drm_fourcc.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <assert.h>
#define TJPF_RGB 0
#define TJPF_RGBA 7
#define TJFLAG_FASTDCT 2048
#define NUM_BUFFERS 2
#define NUM_PBOS 2
typedef void* tjhandle;
typedef tjhandle (*FUNC_tjInitDecompress)(void);
typedef int (*FUNC_tjDestroy)(tjhandle handle);
typedef int (*FUNC_tjDecompressHeader2)(tjhandle handle,
unsigned char *jpegBuf, unsigned long jpegSize,
int *width, int *height, int *jpegSubsamp);
typedef int (*FUNC_tjDecompress2)(tjhandle handle, const unsigned char *jpegBuf,
unsigned long jpegSize, unsigned char *dstBuf,
int width, int pitch, int height, int pixelFormat,
int flags);
typedef char* (*FUNC_tjGetErrorStr2)(tjhandle handle);
typedef enum {
V4L2_BUFFER_TYPE_DMABUF,
V4L2_BUFFER_TYPE_MMAP
} v4l2_buffer_type;
typedef struct {
gsr_capture_v4l2_params params;
vec2i capture_size;
bool should_stop;
bool stop_is_error;
int fd;
int dmabuf_fd[NUM_BUFFERS];
EGLImage dma_image[NUM_BUFFERS];
unsigned int texture_id[NUM_BUFFERS];
unsigned int prev_texture_index;
bool got_first_frame;
void *dmabuf_map[NUM_BUFFERS];
size_t dmabuf_size[NUM_BUFFERS];
unsigned int pbos[NUM_PBOS];
unsigned int pbo_index;
v4l2_buffer_type buffer_type;
void *libturbojpeg_lib;
FUNC_tjInitDecompress tjInitDecompress;
FUNC_tjDestroy tjDestroy;
FUNC_tjDecompressHeader2 tjDecompressHeader2;
FUNC_tjDecompress2 tjDecompress2;
FUNC_tjGetErrorStr2 tjGetErrorStr2;
tjhandle jpeg_decompressor;
double capture_start_time;
bool yuyv_conversion_fallback;
} gsr_capture_v4l2;
static int xioctl(int fd, unsigned long request, void *arg) {
int r;
do {
r = ioctl(fd, request, arg);
} while (-1 == r && EINTR == errno);
return r;
}
static void gsr_capture_v4l2_stop(gsr_capture_v4l2 *self) {
self->params.egl->glDeleteBuffers(NUM_PBOS, self->pbos);
for(int i = 0; i < NUM_PBOS; ++i) {
self->pbos[i] = 0;
}
self->params.egl->glDeleteTextures(NUM_BUFFERS, self->texture_id);
for(int i = 0; i < NUM_BUFFERS; ++i) {
self->texture_id[i] = 0;
}
for(int i = 0; i < NUM_BUFFERS; ++i) {
if(self->dmabuf_map[i]) {
munmap(self->dmabuf_map[i], self->dmabuf_size[i]);
self->dmabuf_map[i] = NULL;
}
if(self->dma_image[i]) {
self->params.egl->eglDestroyImage(self->params.egl->egl_display, self->dma_image[i]);
self->dma_image[i] = NULL;
}
if(self->dmabuf_fd[i] > 0) {
close(self->dmabuf_fd[i]);
self->dmabuf_fd[i] = 0;
}
}
if(self->fd > 0) {
xioctl(self->fd, VIDIOC_STREAMOFF, &(enum v4l2_buf_type){V4L2_BUF_TYPE_VIDEO_CAPTURE});
close(self->fd);
self->fd = 0;
}
if(self->jpeg_decompressor) {
self->tjDestroy(self->jpeg_decompressor);
self->jpeg_decompressor = NULL;
}
if(self->libturbojpeg_lib) {
dlclose(self->libturbojpeg_lib);
self->libturbojpeg_lib = NULL;
}
}
static void gsr_capture_v4l2_reset_cropping(gsr_capture_v4l2 *self) {
struct v4l2_cropcap cropcap = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE
};
if(xioctl(self->fd, VIDIOC_CROPCAP, &cropcap) == 0) {
struct v4l2_crop crop = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.c = cropcap.defrect /* reset to default */
};
if(xioctl(self->fd, VIDIOC_S_CROP, &crop) == -1) {
switch (errno) {
case EINVAL:
/* Cropping not supported. */
break;
default:
/* Errors ignored. */
break;
}
}
} else {
/* Errors ignored. */
}
}
gsr_capture_v4l2_supported_pixfmts gsr_capture_v4l2_get_supported_pixfmts(int fd) {
gsr_capture_v4l2_supported_pixfmts result = {0};
struct v4l2_fmtdesc fmt = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE
};
while(xioctl(fd, VIDIOC_ENUM_FMT, &fmt) == 0) {
//fprintf(stderr, "fmt: %d, desc: %s, flags: %d\n", fmt.pixelformat, fmt.description, fmt.flags);
switch(fmt.pixelformat) {
case V4L2_PIX_FMT_YUYV:
result.yuyv = true;
break;
case V4L2_PIX_FMT_MJPEG:
result.mjpeg = true;
break;
}
++fmt.index;
}
return result;
}
static uint32_t gsr_pixfmt_to_v4l2_pixfmt(gsr_capture_v4l2_pixfmt pixfmt) {
switch(pixfmt) {
case GSR_CAPTURE_V4L2_PIXFMT_AUTO:
assert(false);
break;
case GSR_CAPTURE_V4L2_PIXFMT_YUYV:
return V4L2_PIX_FMT_YUYV;
case GSR_CAPTURE_V4L2_PIXFMT_MJPEG:
return V4L2_PIX_FMT_MJPEG;
}
assert(false);
return V4L2_PIX_FMT_YUYV;
}
static bool gsr_capture_v4l2_validate_pixfmt(gsr_capture_v4l2 *self, const gsr_capture_v4l2_supported_pixfmts supported_pixfmts) {
switch(self->params.pixfmt) {
case GSR_CAPTURE_V4L2_PIXFMT_AUTO: {
if(supported_pixfmts.yuyv) {
self->params.pixfmt = GSR_CAPTURE_V4L2_PIXFMT_YUYV;
} else if(supported_pixfmts.mjpeg) {
self->params.pixfmt = GSR_CAPTURE_V4L2_PIXFMT_MJPEG;
} else {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: %s doesn't support yuyv nor mjpeg. GPU Screen Recorder supports only yuyv and mjpeg at the moment. Report this as an issue, see: https://git.dec05eba.com/?p=about\n", self->params.device_path);
return false;
}
break;
}
case GSR_CAPTURE_V4L2_PIXFMT_YUYV: {
if(!supported_pixfmts.yuyv) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: %s doesn't support yuyv. Try recording with -pixfmt mjpeg or -pixfmt auto instead\n", self->params.device_path);
return false;
}
break;
}
case GSR_CAPTURE_V4L2_PIXFMT_MJPEG: {
if(!supported_pixfmts.mjpeg) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: %s doesn't support mjpeg. Try recording with -pixfmt yuyv or -pixfmt auto instead\n", self->params.device_path);
return false;
}
break;
}
}
return true;
}
static bool gsr_capture_v4l2_create_pbos(gsr_capture_v4l2 *self, int width, int height) {
self->pbo_index = 0;
self->params.egl->glGenBuffers(NUM_PBOS, self->pbos);
for(int i = 0; i < NUM_PBOS; ++i) {
if(self->pbos[i] == 0) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create_pbos: failed to create pixel buffer objects\n");
return false;
}
self->params.egl->glBindBuffer(GL_PIXEL_UNPACK_BUFFER, self->pbos[i]);
self->params.egl->glBufferData(GL_PIXEL_UNPACK_BUFFER, width * height * 4, 0, GL_DYNAMIC_DRAW);
}
self->params.egl->glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
return true;
}
static bool gsr_capture_v4l2_map_buffer(gsr_capture_v4l2 *self, const struct v4l2_format *fmt) {
switch(self->params.pixfmt) {
case GSR_CAPTURE_V4L2_PIXFMT_AUTO: {
assert(false);
return false;
}
case GSR_CAPTURE_V4L2_PIXFMT_YUYV: {
self->params.egl->glGenTextures(NUM_BUFFERS, self->texture_id);
for(int i = 0; i < NUM_BUFFERS; ++i) {
self->dma_image[i] = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, (intptr_t[]) {
EGL_WIDTH, fmt->fmt.pix.width,
EGL_HEIGHT, fmt->fmt.pix.height,
EGL_LINUX_DRM_FOURCC_EXT, DRM_FORMAT_YUYV,
EGL_DMA_BUF_PLANE0_FD_EXT, self->dmabuf_fd[i],
EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0,
EGL_DMA_BUF_PLANE0_PITCH_EXT, fmt->fmt.pix.bytesperline,
EGL_NONE
});
if(!self->dma_image[i]) {
self->yuyv_conversion_fallback = true;
self->dma_image[i] = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, (intptr_t[]) {
EGL_WIDTH, fmt->fmt.pix.width,
EGL_HEIGHT, fmt->fmt.pix.height,
EGL_LINUX_DRM_FOURCC_EXT, DRM_FORMAT_RG88,
EGL_DMA_BUF_PLANE0_FD_EXT, self->dmabuf_fd[i],
EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0,
EGL_DMA_BUF_PLANE0_PITCH_EXT, fmt->fmt.pix.bytesperline,
EGL_NONE
});
if(!self->dma_image[i]) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_map_buffer: eglCreateImage failed, error: %d\n", self->params.egl->eglGetError());
return false;
}
}
self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->texture_id[i]);
self->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, self->dma_image[i]);
self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
if(self->texture_id[i] == 0) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_map_buffer: failed to create texture\n");
return false;
}
}
self->buffer_type = V4L2_BUFFER_TYPE_DMABUF;
break;
}
case GSR_CAPTURE_V4L2_PIXFMT_MJPEG: {
for(int i = 0; i < NUM_BUFFERS; ++i) {
self->dmabuf_size[i] = fmt->fmt.pix.sizeimage;
self->dmabuf_map[i] = mmap(NULL, fmt->fmt.pix.sizeimage, PROT_READ, MAP_SHARED, self->dmabuf_fd[i], 0);
if(self->dmabuf_map[i] == MAP_FAILED) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_map_buffer: mmap failed, error: %s\n", strerror(errno));
return false;
}
// GL_RGBA is intentionally used here instead of GL_RGB, because the performance is much better when using glTexSubImage2D (22% cpu usage compared to 38% cpu usage)
self->texture_id[i] = gl_create_texture(self->params.egl, fmt->fmt.pix.width, fmt->fmt.pix.height, GL_RGBA8, GL_RGBA, GL_LINEAR);
if(self->texture_id[i] == 0) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_map_buffer: failed to create texture\n");
return false;
}
}
if(!gsr_capture_v4l2_create_pbos(self, fmt->fmt.pix.width, fmt->fmt.pix.height))
return false;
self->buffer_type = V4L2_BUFFER_TYPE_MMAP;
break;
}
}
return true;
}
static int gsr_capture_v4l2_setup(gsr_capture_v4l2 *self) {
self->fd = open(self->params.device_path, O_RDWR | O_NONBLOCK);
if(self->fd < 0) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: failed to open %s, error: %s\n", self->params.device_path, strerror(errno));
return -1;
}
struct v4l2_capability cap = {0};
if(xioctl(self->fd, VIDIOC_QUERYCAP, &cap) == -1) {
if(EINVAL == errno) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: %s isn't a v4l2 device\n", self->params.device_path);
return -1;
} else {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: VIDIOC_QUERYCAP failed, error: %s\n", strerror(errno));
return -1;
}
}
if(!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: %s isn't a video capture device\n", self->params.device_path);
return -1;
}
if(!(cap.capabilities & V4L2_CAP_STREAMING)) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: %s doesn't support streaming i/o\n", self->params.device_path);
return -1;
}
gsr_capture_v4l2_reset_cropping(self);
const gsr_capture_v4l2_supported_pixfmts supported_pixfmts = gsr_capture_v4l2_get_supported_pixfmts(self->fd);
if(!gsr_capture_v4l2_validate_pixfmt(self, supported_pixfmts))
return -1;
if(self->params.pixfmt == GSR_CAPTURE_V4L2_PIXFMT_MJPEG) {
dlerror(); /* clear */
self->libturbojpeg_lib = dlopen("libturbojpeg.so.0", RTLD_LAZY);
if(!self->libturbojpeg_lib) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: failed to load libturbojpeg.so.0 which is required for camera mjpeg capture, error: %s\n", dlerror());
return -1;
}
self->tjInitDecompress = (FUNC_tjInitDecompress)dlsym(self->libturbojpeg_lib, "tjInitDecompress");
self->tjDestroy = (FUNC_tjDestroy)dlsym(self->libturbojpeg_lib, "tjDestroy");
self->tjDecompressHeader2 = (FUNC_tjDecompressHeader2)dlsym(self->libturbojpeg_lib, "tjDecompressHeader2");
self->tjDecompress2 = (FUNC_tjDecompress2)dlsym(self->libturbojpeg_lib, "tjDecompress2");
self->tjGetErrorStr2 = (FUNC_tjGetErrorStr2)dlsym(self->libturbojpeg_lib, "tjGetErrorStr2");
if(!self->tjInitDecompress || !self->tjDestroy || !self->tjDecompressHeader2 || !self->tjDecompress2 || !self->tjGetErrorStr2) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: libturbojpeg.so.0 is missing functions. The libturbojpeg version installed on your system might be outdated\n");
return -1;
}
self->jpeg_decompressor = self->tjInitDecompress();
if(!self->jpeg_decompressor) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: failed to create jpeg decompressor\n");
return -1;
}
}
const uint32_t v4l2_pixfmt = gsr_pixfmt_to_v4l2_pixfmt(self->params.pixfmt);
struct v4l2_format fmt = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.fmt.pix.pixelformat = v4l2_pixfmt
};
if(xioctl(self->fd, VIDIOC_S_FMT, &fmt) == -1) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: VIDIOC_S_FMT failed, error: %s\n", strerror(errno));
return -1;
}
if(fmt.fmt.pix.pixelformat != v4l2_pixfmt) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: pixel format isn't as requested (got pixel format: %u, requested: %u), error: %s\n", fmt.fmt.pix.pixelformat, v4l2_pixfmt, strerror(errno));
return -1;
}
self->capture_size.x = fmt.fmt.pix.width;
self->capture_size.y = fmt.fmt.pix.height;
struct v4l2_requestbuffers reqbuf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.memory = V4L2_MEMORY_MMAP,
.count = NUM_BUFFERS
};
if(xioctl(self->fd, VIDIOC_REQBUFS, &reqbuf) == -1) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: VIDIOC_REQBUFS failed, error: %s\n", strerror(errno));
return -1;
}
for(int i = 0; i < NUM_BUFFERS; ++i) {
struct v4l2_exportbuffer expbuf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.index = i,
.flags = O_RDONLY
};
if(xioctl(self->fd, VIDIOC_EXPBUF, &expbuf) == -1) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: VIDIOC_EXPBUF failed, error: %s\n", strerror(errno));
return -1;
}
self->dmabuf_fd[i] = expbuf.fd;
}
if(!gsr_capture_v4l2_map_buffer(self, &fmt))
return -1;
for(int i = 0; i < NUM_BUFFERS; ++i) {
struct v4l2_buffer buf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.index = i,
.memory = V4L2_MEMORY_MMAP
};
xioctl(self->fd, VIDIOC_QBUF, &buf);
}
if(xioctl(self->fd, VIDIOC_STREAMON, &(enum v4l2_buf_type){V4L2_BUF_TYPE_VIDEO_CAPTURE})) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create: VIDIOC_STREAMON failed, error: %s\n", strerror(errno));
return -1;
}
fprintf(stderr, "gsr info: gsr_capture_v4l2_create: waiting for camera %s to be ready\n", self->params.device_path);
return 0;
}
static int gsr_capture_v4l2_start(gsr_capture *cap, gsr_capture_metadata *capture_metadata) {
gsr_capture_v4l2 *self = cap->priv;
const int result = gsr_capture_v4l2_setup(self);
if(result != 0) {
gsr_capture_v4l2_stop(self);
return result;
}
if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) {
capture_metadata->video_size = self->capture_size;
} else {
self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution);
capture_metadata->video_size = self->params.output_resolution;
}
self->capture_start_time = clock_get_monotonic_seconds();
return 0;
}
static void gsr_capture_v4l2_tick(gsr_capture *cap) {
gsr_capture_v4l2 *self = cap->priv;
if(!self->got_first_frame && !self->should_stop) {
const double timeout_sec = 5.0;
if(clock_get_monotonic_seconds() - self->capture_start_time >= timeout_sec) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_capture: didn't receive camera data in %f seconds\n", timeout_sec);
self->should_stop = true;
self->stop_is_error = true;
}
}
}
static void gsr_capture_v4l2_decode_jpeg_to_texture(gsr_capture_v4l2 *self, const struct v4l2_buffer *buf) {
int jpeg_subsamp = 0;
int jpeg_width = 0;
int jpeg_height = 0;
if(self->tjDecompressHeader2(self->jpeg_decompressor, self->dmabuf_map[buf->index], buf->bytesused, &jpeg_width, &jpeg_height, &jpeg_subsamp) != 0) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_capture: failed to decompress camera jpeg header data, error: %s\n", self->tjGetErrorStr2(self->jpeg_decompressor));
return;
}
if(jpeg_width != self->capture_size.x || jpeg_height != self->capture_size.y) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_capture: got jpeg data of incorrect dimensions. Expected %dx%d, got %dx%d\n", self->capture_size.x, self->capture_size.y, jpeg_width, jpeg_height);
return;
}
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_id[buf->index]);
self->pbo_index = (self->pbo_index + 1) % NUM_PBOS;
const unsigned int next_pbo_index = (self->pbo_index + 1) % NUM_PBOS;
self->params.egl->glBindBuffer(GL_PIXEL_UNPACK_BUFFER, self->pbos[self->pbo_index]);
self->params.egl->glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, self->capture_size.x, self->capture_size.y, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
self->params.egl->glBindBuffer(GL_PIXEL_UNPACK_BUFFER, self->pbos[next_pbo_index]);
self->params.egl->glBufferData(GL_PIXEL_UNPACK_BUFFER, self->capture_size.x * self->capture_size.y * 4, 0, GL_DYNAMIC_DRAW);
void *mapped_buffer = self->params.egl->glMapBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, self->capture_size.x * self->capture_size.y * 4, GL_MAP_WRITE_BIT);
if(mapped_buffer) {
if(self->tjDecompress2(self->jpeg_decompressor, self->dmabuf_map[buf->index], buf->bytesused, mapped_buffer, jpeg_width, 0, jpeg_height, TJPF_RGBA, TJFLAG_FASTDCT) != 0)
fprintf(stderr, "gsr error: gsr_capture_v4l2_capture: failed to decompress camera jpeg data, error: %s\n", self->tjGetErrorStr2(self->jpeg_decompressor));
self->params.egl->glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
}
self->params.egl->glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
}
static int gsr_capture_v4l2_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
(void)color_conversion;
gsr_capture_v4l2 *self = cap->priv;
struct v4l2_buffer buf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.memory = V4L2_MEMORY_MMAP
};
xioctl(self->fd, VIDIOC_DQBUF, &buf);
unsigned int texture_index = buf.index;
if(buf.bytesused > 0 && !(buf.flags & V4L2_BUF_FLAG_ERROR)) {
if(!self->got_first_frame)
fprintf(stderr, "gsr info: gsr_capture_v4l2_capture: camera %s is now ready\n", self->params.device_path);
self->got_first_frame = true;
switch(self->buffer_type) {
case V4L2_BUFFER_TYPE_DMABUF: {
//self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->texture_id);
//self->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, self->dma_image[buf.index]);
//self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
break;
}
case V4L2_BUFFER_TYPE_MMAP: {
//xioctl(self->dmabuf_fd[buf.index], DMA_BUF_IOCTL_SYNC, &(struct dma_buf_sync){ .flags = DMA_BUF_SYNC_START });
gsr_capture_v4l2_decode_jpeg_to_texture(self, &buf);
//xioctl(self->dmabuf_fd[buf.index], DMA_BUF_IOCTL_SYNC, &(struct dma_buf_sync){ .flags = DMA_BUF_SYNC_END });
break;
}
}
self->prev_texture_index = buf.index;
} else {
texture_index = self->prev_texture_index;
}
xioctl(self->fd, VIDIOC_QBUF, &buf);
const vec2i output_size = scale_keep_aspect_ratio(self->capture_size, capture_metadata->recording_size);
const vec2i target_pos = gsr_capture_get_target_position(output_size, capture_metadata);
self->params.egl->glFlush();
if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA)
self->params.egl->glFinish();
if(self->buffer_type == V4L2_BUFFER_TYPE_DMABUF) {
gsr_color_conversion_draw(color_conversion, self->texture_id[texture_index],
target_pos, output_size,
(vec2i){0, 0}, self->capture_size, self->capture_size,
GSR_ROT_0, capture_metadata->flip, self->yuyv_conversion_fallback ? GSR_SOURCE_COLOR_YUYV : GSR_SOURCE_COLOR_RGB, true);
} else {
gsr_color_conversion_draw(color_conversion, self->texture_id[texture_index],
target_pos, output_size,
(vec2i){0, 0}, self->capture_size, self->capture_size,
GSR_ROT_0, capture_metadata->flip, GSR_SOURCE_COLOR_RGB, false);
}
return self->got_first_frame ? 0 : -1;
}
static bool gsr_capture_v4l2_uses_external_image(gsr_capture *cap) {
(void)cap;
return true;
}
static bool gsr_capture_v4l2_should_stop(gsr_capture *cap, bool *err) {
gsr_capture_v4l2 *self = cap->priv;
if(err)
*err = self->stop_is_error;
return self->should_stop;
}
static bool gsr_capture_v4l2_is_damaged(gsr_capture *cap) {
gsr_capture_v4l2 *self = cap->priv;
struct pollfd poll_data = {
.fd = self->fd,
.events = POLLIN,
.revents = 0
};
return poll(&poll_data, 1, 0) > 0 && (poll_data.revents & POLLIN);
}
static void gsr_capture_v4l2_clear_damage(gsr_capture *cap) {
gsr_capture_v4l2 *self = cap->priv;
(void)self;
}
static void gsr_capture_v4l2_destroy(gsr_capture *cap) {
gsr_capture_v4l2 *self = cap->priv;
if(cap->priv) {
gsr_capture_v4l2_stop(self);
free(cap->priv);
cap->priv = NULL;
}
free(cap);
}
gsr_capture* gsr_capture_v4l2_create(const gsr_capture_v4l2_params *params) {
if(!params) {
fprintf(stderr, "gsr error: gsr_capture_v4l2_create params is NULL\n");
return NULL;
}
gsr_capture *cap = calloc(1, sizeof(gsr_capture));
if(!cap)
return NULL;
gsr_capture_v4l2 *cap_camera = calloc(1, sizeof(gsr_capture_v4l2));
if(!cap_camera) {
free(cap);
return NULL;
}
cap_camera->params = *params;
*cap = (gsr_capture) {
.start = gsr_capture_v4l2_start,
.tick = gsr_capture_v4l2_tick,
.should_stop = gsr_capture_v4l2_should_stop,
.capture = gsr_capture_v4l2_capture,
.uses_external_image = gsr_capture_v4l2_uses_external_image,
.is_damaged = gsr_capture_v4l2_is_damaged,
.clear_damage = gsr_capture_v4l2_clear_damage,
.destroy = gsr_capture_v4l2_destroy,
.priv = cap_camera
};
return cap;
}
void gsr_capture_v4l2_list_devices(v4l2_devices_query_callback callback, void *userdata) {
void *libturbojpeg_lib = dlopen("libturbojpeg.so.0", RTLD_LAZY);
const bool has_libturbojpeg_lib = libturbojpeg_lib != NULL;
if(libturbojpeg_lib)
dlclose(libturbojpeg_lib);
char v4l2_device_path[128];
for(int i = 0; i < 8; ++i) {
snprintf(v4l2_device_path, sizeof(v4l2_device_path), "/dev/video%d", i);
const int fd = open(v4l2_device_path, O_RDWR | O_NONBLOCK);
if(fd < 0)
continue;
struct v4l2_capability cap = {0};
if(xioctl(fd, VIDIOC_QUERYCAP, &cap) == -1)
goto next;
if(!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
goto next;
if(!(cap.capabilities & V4L2_CAP_STREAMING))
goto next;
struct v4l2_format fmt = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE
};
if(xioctl(fd, VIDIOC_G_FMT, &fmt) == -1)
goto next;
gsr_capture_v4l2_supported_pixfmts supported_pixfmts = gsr_capture_v4l2_get_supported_pixfmts(fd);
if(!has_libturbojpeg_lib)
supported_pixfmts.mjpeg = false;
if(supported_pixfmts.yuyv || supported_pixfmts.mjpeg)
callback(v4l2_device_path, supported_pixfmts, (vec2i){ fmt.fmt.pix.width, fmt.fmt.pix.height }, userdata);
next:
close(fd);
}
}

324
src/capture/xcomposite.c Normal file
View File

@ -0,0 +1,324 @@
#include "../../include/capture/xcomposite.h"
#include "../../include/window_texture.h"
#include "../../include/utils.h"
#include "../../include/cursor.h"
#include "../../include/color_conversion.h"
#include "../../include/window/window.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <X11/Xlib.h>
typedef struct {
gsr_capture_xcomposite_params params;
Display *display;
bool should_stop;
bool stop_is_error;
bool window_resized;
bool follow_focused_initialized;
bool init_new_window;
Window window;
vec2i window_pos;
vec2i window_size;
vec2i texture_size;
double window_resize_timer;
WindowTexture window_texture;
Atom net_active_window_atom;
bool clear_background;
} gsr_capture_xcomposite;
static void gsr_capture_xcomposite_stop(gsr_capture_xcomposite *self) {
window_texture_deinit(&self->window_texture);
}
static int max_int(int a, int b) {
return a > b ? a : b;
}
static Window get_focused_window(Display *display, Atom net_active_window_atom) {
Atom type;
int format = 0;
unsigned long num_items = 0;
unsigned long bytes_after = 0;
unsigned char *properties = NULL;
if(XGetWindowProperty(display, DefaultRootWindow(display), net_active_window_atom, 0, 1024, False, AnyPropertyType, &type, &format, &num_items, &bytes_after, &properties) == Success && properties) {
Window focused_window = *(unsigned long*)properties;
XFree(properties);
return focused_window;
}
return None;
}
static int gsr_capture_xcomposite_start(gsr_capture *cap, gsr_capture_metadata *capture_metadata) {
gsr_capture_xcomposite *self = cap->priv;
if(self->params.follow_focused) {
self->net_active_window_atom = XInternAtom(self->display, "_NET_ACTIVE_WINDOW", False);
if(!self->net_active_window_atom) {
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start failed: failed to get _NET_ACTIVE_WINDOW atom\n");
return -1;
}
self->window = get_focused_window(self->display, self->net_active_window_atom);
} else {
self->window = self->params.window;
}
/* TODO: Do these in tick, and allow error if follow_focused */
XWindowAttributes attr;
if(!XGetWindowAttributes(self->display, self->window, &attr) && !self->params.follow_focused) {
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start failed: invalid window id: %lu\n", self->window);
return -1;
}
self->window_pos.x = attr.x;
self->window_pos.y = attr.y;
self->window_size.x = max_int(attr.width, 0);
self->window_size.y = max_int(attr.height, 0);
if(self->params.follow_focused)
XSelectInput(self->display, DefaultRootWindow(self->display), PropertyChangeMask);
// TODO: Get select and add these on top of it and then restore at the end. Also do the same in other xcomposite
XSelectInput(self->display, self->window, StructureNotifyMask | ExposureMask);
if(window_texture_init(&self->window_texture, self->display, self->window, self->params.egl) != 0 && !self->params.follow_focused) {
fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to get window texture for window %ld\n", (long)self->window);
return -1;
}
self->texture_size.x = self->window_texture.window_width;
self->texture_size.y = self->window_texture.window_height;
if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) {
capture_metadata->video_size = self->texture_size;
} else {
capture_metadata->video_size = self->params.output_resolution;
}
self->window_resize_timer = clock_get_monotonic_seconds();
return 0;
}
static void gsr_capture_xcomposite_tick(gsr_capture *cap) {
gsr_capture_xcomposite *self = cap->priv;
if(self->params.follow_focused && !self->follow_focused_initialized) {
self->init_new_window = true;
}
if(self->init_new_window) {
self->init_new_window = false;
Window focused_window = get_focused_window(self->display, self->net_active_window_atom);
if(focused_window != self->window || !self->follow_focused_initialized) {
self->follow_focused_initialized = true;
XSelectInput(self->display, self->window, 0);
self->window = focused_window;
XSelectInput(self->display, self->window, StructureNotifyMask | ExposureMask);
XWindowAttributes attr;
attr.width = 0;
attr.height = 0;
if(!XGetWindowAttributes(self->display, self->window, &attr))
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick failed: invalid window id: %lu\n", self->window);
self->window_pos.x = attr.x;
self->window_pos.y = attr.y;
self->window_size.x = max_int(attr.width, 0);
self->window_size.y = max_int(attr.height, 0);
window_texture_deinit(&self->window_texture);
window_texture_init(&self->window_texture, self->display, self->window, self->params.egl); // TODO: Do not do the below window_texture_on_resize after this
self->texture_size.x = self->window_texture.window_width;
self->texture_size.y = self->window_texture.window_height;
self->window_resized = false;
self->clear_background = true;
}
}
const double window_resize_timeout = 1.0; // 1 second
if(self->window_resized && clock_get_monotonic_seconds() - self->window_resize_timer >= window_resize_timeout) {
self->window_resized = false;
if(window_texture_on_resize(&self->window_texture) != 0) {
fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: window_texture_on_resize failed\n");
//self->should_stop = true;
//self->stop_is_error = true;
return;
}
self->texture_size.x = self->window_texture.window_width;
self->texture_size.y = self->window_texture.window_height;
self->clear_background = true;
}
}
static void gsr_capture_xcomposite_on_event(gsr_capture *cap, gsr_egl *egl) {
gsr_capture_xcomposite *self = cap->priv;
XEvent *xev = gsr_window_get_event_data(egl->window);
switch(xev->type) {
case DestroyNotify: {
/* Window died (when not following focused window), so we stop recording */
if(!self->params.follow_focused && xev->xdestroywindow.window == self->window) {
self->should_stop = true;
self->stop_is_error = false;
}
break;
}
case Expose: {
/* Requires window texture recreate */
if(xev->xexpose.count == 0 && xev->xexpose.window == self->window) {
self->window_resize_timer = clock_get_monotonic_seconds();
self->window_resized = true;
}
break;
}
case ConfigureNotify: {
self->window_pos.x = xev->xconfigure.x;
self->window_pos.y = xev->xconfigure.y;
/* Window resized */
if(xev->xconfigure.window == self->window && (xev->xconfigure.width != self->window_size.x || xev->xconfigure.height != self->window_size.y)) {
self->window_size.x = max_int(xev->xconfigure.width, 0);
self->window_size.y = max_int(xev->xconfigure.height, 0);
self->window_resize_timer = clock_get_monotonic_seconds();
self->window_resized = true;
}
break;
}
case PropertyNotify: {
/* Focused window changed */
if(self->params.follow_focused && xev->xproperty.atom == self->net_active_window_atom) {
self->init_new_window = true;
}
break;
}
}
}
static bool gsr_capture_xcomposite_should_stop(gsr_capture *cap, bool *err) {
gsr_capture_xcomposite *self = cap->priv;
if(self->should_stop) {
if(err)
*err = self->stop_is_error;
return true;
}
if(err)
*err = false;
return false;
}
static void gsr_capture_xcomposite_pre_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
(void)capture_metadata;
gsr_capture_xcomposite *self = cap->priv;
if(self->clear_background) {
self->clear_background = false;
color_conversion->schedule_clear = true;
}
}
static int gsr_capture_xcomposite_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
gsr_capture_xcomposite *self = cap->priv;
const vec2i output_size = scale_keep_aspect_ratio(self->texture_size, capture_metadata->recording_size);
const vec2i target_pos = gsr_capture_get_target_position(output_size, capture_metadata);
//self->params.egl->glFlush();
//self->params.egl->glFinish();
gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
target_pos, output_size,
(vec2i){0, 0}, self->texture_size, self->texture_size,
GSR_ROT_0, capture_metadata->flip, GSR_SOURCE_COLOR_RGB, false);
if(self->params.record_cursor && self->params.cursor->visible) {
const vec2d scale = {
self->texture_size.x == 0 ? 0 : (double)output_size.x / (double)self->texture_size.x,
self->texture_size.y == 0 ? 0 : (double)output_size.y / (double)self->texture_size.y
};
const vec2i cursor_pos = {
target_pos.x + (self->params.cursor->position.x - self->params.cursor->hotspot.x - self->window_pos.x) * scale.x,
target_pos.y + (self->params.cursor->position.y - self->params.cursor->hotspot.y - self->window_pos.y) * scale.y
};
self->params.egl->glEnable(GL_SCISSOR_TEST);
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
gsr_color_conversion_draw(color_conversion, self->params.cursor->texture_id,
cursor_pos, (vec2i){self->params.cursor->size.x * scale.x, self->params.cursor->size.y * scale.y},
(vec2i){0, 0}, self->params.cursor->size, self->params.cursor->size,
GSR_ROT_0, capture_metadata->flip, GSR_SOURCE_COLOR_RGB, false);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
//self->params.egl->glFlush();
//self->params.egl->glFinish();
return 0;
}
static uint64_t gsr_capture_xcomposite_get_window_id(gsr_capture *cap) {
gsr_capture_xcomposite *self = cap->priv;
return self->window;
}
static void gsr_capture_xcomposite_destroy(gsr_capture *cap) {
if(cap->priv) {
gsr_capture_xcomposite_stop(cap->priv);
free(cap->priv);
cap->priv = NULL;
}
free(cap);
}
gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *params) {
if(!params) {
fprintf(stderr, "gsr error: gsr_capture_xcomposite_create params is NULL\n");
return NULL;
}
gsr_capture *cap = calloc(1, sizeof(gsr_capture));
if(!cap)
return NULL;
gsr_capture_xcomposite *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite));
if(!cap_xcomp) {
free(cap);
return NULL;
}
cap_xcomp->params = *params;
cap_xcomp->display = gsr_window_get_display(params->egl->window);
*cap = (gsr_capture) {
.start = gsr_capture_xcomposite_start,
.on_event = gsr_capture_xcomposite_on_event,
.tick = gsr_capture_xcomposite_tick,
.should_stop = gsr_capture_xcomposite_should_stop,
.pre_capture = gsr_capture_xcomposite_pre_capture,
.capture = gsr_capture_xcomposite_capture,
.uses_external_image = NULL,
.get_window_id = gsr_capture_xcomposite_get_window_id,
.destroy = gsr_capture_xcomposite_destroy,
.priv = cap_xcomp
};
return cap;
}

226
src/capture/ximage.c Normal file
View File

@ -0,0 +1,226 @@
#include "../../include/capture/ximage.h"
#include "../../include/utils.h"
#include "../../include/cursor.h"
#include "../../include/color_conversion.h"
#include "../../include/window/window.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <X11/Xlib.h>
/* TODO: update when monitors are reconfigured */
typedef struct {
gsr_capture_ximage_params params;
Display *display;
gsr_monitor monitor;
vec2i capture_pos;
vec2i capture_size;
unsigned int texture_id;
Window root_window;
} gsr_capture_ximage;
static void gsr_capture_ximage_stop(gsr_capture_ximage *self) {
if(self->texture_id) {
self->params.egl->glDeleteTextures(1, &self->texture_id);
self->texture_id = 0;
}
}
static int max_int(int a, int b) {
return a > b ? a : b;
}
static int gsr_capture_ximage_start(gsr_capture *cap, gsr_capture_metadata *capture_metadata) {
gsr_capture_ximage *self = cap->priv;
self->root_window = DefaultRootWindow(self->display);
if(!get_monitor_by_name(self->params.egl, GSR_CONNECTION_X11, self->params.display_to_capture, &self->monitor)) {
fprintf(stderr, "gsr error: gsr_capture_ximage_start: failed to find monitor by name \"%s\"\n", self->params.display_to_capture);
gsr_capture_ximage_stop(self);
return -1;
}
self->capture_pos = self->monitor.pos;
self->capture_size = self->monitor.size;
if(self->params.region_size.x > 0 && self->params.region_size.y > 0)
self->capture_size = self->params.region_size;
if(self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0) {
self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution);
capture_metadata->video_size = self->params.output_resolution;
} else if(self->params.region_size.x > 0 && self->params.region_size.y > 0) {
capture_metadata->video_size = self->params.region_size;
} else {
capture_metadata->video_size = self->capture_size;
}
self->texture_id = gl_create_texture(self->params.egl, self->capture_size.x, self->capture_size.y, GL_RGB8, GL_RGB, GL_LINEAR);
if(self->texture_id == 0) {
fprintf(stderr, "gsr error: gsr_capture_ximage_start: failed to create texture\n");
gsr_capture_ximage_stop(self);
return -1;
}
return 0;
}
static bool gsr_capture_ximage_upload_to_texture(gsr_capture_ximage *self, int x, int y, int width, int height) {
const int max_width = XWidthOfScreen(DefaultScreenOfDisplay(self->display));
const int max_height = XHeightOfScreen(DefaultScreenOfDisplay(self->display));
if(x < 0)
x = 0;
else if(x >= max_width)
x = max_width - 1;
if(y < 0)
y = 0;
else if(y >= max_height)
y = max_height - 1;
if(width < 0)
width = 0;
else if(x + width >= max_width)
width = max_width - x;
if(height < 0)
height = 0;
else if(y + height >= max_height)
height = max_height - y;
XImage *image = XGetImage(self->display, self->root_window, x, y, width, height, AllPlanes, ZPixmap);
if(!image) {
fprintf(stderr, "gsr error: gsr_capture_ximage_upload_to_texture: XGetImage failed\n");
return false;
}
bool success = false;
uint8_t *image_data = malloc(image->width * image->height * 3);
if(!image_data) {
fprintf(stderr, "gsr error: gsr_capture_ximage_upload_to_texture: failed to allocate image data\n");
goto done;
}
for(int y = 0; y < image->height; ++y) {
for(int x = 0; x < image->width; ++x) {
unsigned long pixel = XGetPixel(image, x, y);
unsigned char red = (pixel & image->red_mask) >> 16;
unsigned char green = (pixel & image->green_mask) >> 8;
unsigned char blue = pixel & image->blue_mask;
const size_t texture_data_index = (x + y * image->width) * 3;
image_data[texture_data_index + 0] = red;
image_data[texture_data_index + 1] = green;
image_data[texture_data_index + 2] = blue;
}
}
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_id);
// TODO: Change to GL_RGBA for better performance? image_data needs alpha then as well
self->params.egl->glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, image->width, image->height, GL_RGB, GL_UNSIGNED_BYTE, image_data);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
success = true;
done:
free(image_data);
XDestroyImage(image);
return success;
}
static int gsr_capture_ximage_capture(gsr_capture *cap, gsr_capture_metadata *capture_metadata, gsr_color_conversion *color_conversion) {
gsr_capture_ximage *self = cap->priv;
const vec2i output_size = scale_keep_aspect_ratio(self->capture_size, capture_metadata->recording_size);
const vec2i target_pos = gsr_capture_get_target_position(output_size, capture_metadata);
gsr_capture_ximage_upload_to_texture(self, self->capture_pos.x + self->params.region_position.x, self->capture_pos.y + self->params.region_position.y, self->capture_size.x, self->capture_size.y);
gsr_color_conversion_draw(color_conversion, self->texture_id,
target_pos, output_size,
(vec2i){0, 0}, self->capture_size, self->capture_size,
GSR_ROT_0, capture_metadata->flip, GSR_SOURCE_COLOR_RGB, false);
if(self->params.record_cursor && self->params.cursor->visible) {
const vec2d scale = {
self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x,
self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y
};
const vec2i cursor_pos = {
target_pos.x + (self->params.cursor->position.x - self->params.cursor->hotspot.x) * scale.x - self->capture_pos.x - self->params.region_position.x,
target_pos.y + (self->params.cursor->position.y - self->params.cursor->hotspot.y) * scale.y - self->capture_pos.y - self->params.region_position.y
};
self->params.egl->glEnable(GL_SCISSOR_TEST);
self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y);
gsr_color_conversion_draw(color_conversion, self->params.cursor->texture_id,
cursor_pos, (vec2i){self->params.cursor->size.x * scale.x, self->params.cursor->size.y * scale.y},
(vec2i){0, 0}, self->params.cursor->size, self->params.cursor->size,
GSR_ROT_0, capture_metadata->flip, GSR_SOURCE_COLOR_RGB, false);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
self->params.egl->glFlush();
self->params.egl->glFinish();
return 0;
}
static void gsr_capture_ximage_destroy(gsr_capture *cap) {
gsr_capture_ximage *self = cap->priv;
if(cap->priv) {
gsr_capture_ximage_stop(self);
free((void*)self->params.display_to_capture);
self->params.display_to_capture = NULL;
free(self);
cap->priv = NULL;
}
free(cap);
}
gsr_capture* gsr_capture_ximage_create(const gsr_capture_ximage_params *params) {
if(!params) {
fprintf(stderr, "gsr error: gsr_capture_ximage_create params is NULL\n");
return NULL;
}
gsr_capture *cap = calloc(1, sizeof(gsr_capture));
if(!cap)
return NULL;
gsr_capture_ximage *cap_ximage = calloc(1, sizeof(gsr_capture_ximage));
if(!cap_ximage) {
free(cap);
return NULL;
}
const char *display_to_capture = strdup(params->display_to_capture);
if(!display_to_capture) {
free(cap);
free(cap_ximage);
return NULL;
}
cap_ximage->params = *params;
cap_ximage->display = gsr_window_get_display(params->egl->window);
cap_ximage->params.display_to_capture = display_to_capture;
*cap = (gsr_capture) {
.start = gsr_capture_ximage_start,
.tick = NULL,
.should_stop = NULL,
.capture = gsr_capture_ximage_capture,
.uses_external_image = NULL,
.get_window_id = NULL,
.destroy = gsr_capture_ximage_destroy,
.priv = cap_ximage
};
return cap;
}

259
src/codec_query/nvenc.c Normal file
View File

@ -0,0 +1,259 @@
#include "../../include/codec_query/nvenc.h"
#include "../../include/cuda.h"
#include "../../external/nvEncodeAPI.h"
#include <dlfcn.h>
#include <stdio.h>
#include <string.h>
#define NVENCAPI_MAJOR_VERSION_470 11
#define NVENCAPI_MINOR_VERSION_470 1
#define NVENCAPI_VERSION_470 (NVENCAPI_MAJOR_VERSION_470 | (NVENCAPI_MINOR_VERSION_470 << 24))
#define NVENCAPI_STRUCT_VERSION_CUSTOM(nvenc_api_version, struct_version) ((uint32_t)(nvenc_api_version) | ((struct_version)<<16) | (0x7 << 28))
static void* open_nvenc_library(void) {
dlerror(); /* clear */
void *lib = dlopen("libnvidia-encode.so.1", RTLD_LAZY);
if(!lib) {
lib = dlopen("libnvidia-encode.so", RTLD_LAZY);
if(!lib) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc failed: failed to load libnvidia-encode.so/libnvidia-encode.so.1, error: %s\n", dlerror());
return NULL;
}
}
return lib;
}
static bool profile_is_h264(const GUID *profile_guid) {
const GUID *h264_guids[] = {
&NV_ENC_H264_PROFILE_BASELINE_GUID,
&NV_ENC_H264_PROFILE_MAIN_GUID,
&NV_ENC_H264_PROFILE_HIGH_GUID,
&NV_ENC_H264_PROFILE_PROGRESSIVE_HIGH_GUID,
&NV_ENC_H264_PROFILE_CONSTRAINED_HIGH_GUID
};
for(int i = 0; i < 5; ++i) {
if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
return true;
}
return false;
}
static bool profile_is_hevc(const GUID *profile_guid) {
const GUID *h264_guids[] = {
&NV_ENC_HEVC_PROFILE_MAIN_GUID,
};
for(int i = 0; i < 1; ++i) {
if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
return true;
}
return false;
}
static bool profile_is_hevc_10bit(const GUID *profile_guid) {
const GUID *h264_guids[] = {
&NV_ENC_HEVC_PROFILE_MAIN10_GUID,
};
for(int i = 0; i < 1; ++i) {
if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
return true;
}
return false;
}
static bool profile_is_av1(const GUID *profile_guid) {
const GUID *h264_guids[] = {
&NV_ENC_AV1_PROFILE_MAIN_GUID,
};
for(int i = 0; i < 1; ++i) {
if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
return true;
}
return false;
}
/* Returns 0 on error */
static int nvenc_get_encoding_capability(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, const GUID *encode_guid, uint32_t nvenc_api_version, NV_ENC_CAPS cap) {
NV_ENC_CAPS_PARAM param = {
.version = NVENCAPI_STRUCT_VERSION_CUSTOM(nvenc_api_version, 1),
.capsToQuery = cap
};
int value = 0;
if(function_list->nvEncGetEncodeCaps(nvenc_encoder, *encode_guid, &param, &value) != NV_ENC_SUCCESS)
return 0;
return value;
}
static vec2i encoder_get_max_resolution(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, const GUID *encode_guid, uint32_t nvenc_api_version) {
return (vec2i){
.x = nvenc_get_encoding_capability(function_list, nvenc_encoder, encode_guid, nvenc_api_version, NV_ENC_CAPS_WIDTH_MAX),
.y = nvenc_get_encoding_capability(function_list, nvenc_encoder, encode_guid, nvenc_api_version, NV_ENC_CAPS_HEIGHT_MAX),
};
}
static bool encoder_get_supported_profiles(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, const GUID *encoder_guid, gsr_supported_video_codecs *supported_video_codecs, uint32_t nvenc_api_version) {
bool success = false;
GUID *profile_guids = NULL;
uint32_t profile_guid_count = 0;
if(function_list->nvEncGetEncodeProfileGUIDCount(nvenc_encoder, *encoder_guid, &profile_guid_count) != NV_ENC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeProfileGUIDCount failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
goto fail;
}
if(profile_guid_count == 0)
goto fail;
profile_guids = calloc(profile_guid_count, sizeof(GUID));
if(!profile_guids) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to allocate %d guids\n", (int)profile_guid_count);
goto fail;
}
if(function_list->nvEncGetEncodeProfileGUIDs(nvenc_encoder, *encoder_guid, profile_guids, profile_guid_count, &profile_guid_count) != NV_ENC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeProfileGUIDs failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
goto fail;
}
const vec2i max_resolution = encoder_get_max_resolution(function_list, nvenc_encoder, encoder_guid, nvenc_api_version);
for(uint32_t i = 0; i < profile_guid_count; ++i) {
if(profile_is_h264(&profile_guids[i])) {
supported_video_codecs->h264 = (gsr_supported_video_codec){ true, false, max_resolution };
} else if(profile_is_hevc(&profile_guids[i])) {
supported_video_codecs->hevc = (gsr_supported_video_codec){ true, false, max_resolution };
} else if(profile_is_hevc_10bit(&profile_guids[i])) {
supported_video_codecs->hevc_hdr = (gsr_supported_video_codec){ true, false, max_resolution };
supported_video_codecs->hevc_10bit = (gsr_supported_video_codec){ true, false, max_resolution };
} else if(profile_is_av1(&profile_guids[i])) {
supported_video_codecs->av1 = (gsr_supported_video_codec){ true, false, max_resolution };
supported_video_codecs->av1_hdr = (gsr_supported_video_codec){ true, false, max_resolution };
supported_video_codecs->av1_10bit = (gsr_supported_video_codec){ true, false, max_resolution };
}
}
success = true;
fail:
if(profile_guids)
free(profile_guids);
return success;
}
static bool get_supported_video_codecs(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, gsr_supported_video_codecs *supported_video_codecs, uint32_t nvenc_api_version) {
bool success = false;
GUID *encoder_guids = NULL;
*supported_video_codecs = (gsr_supported_video_codecs){0};
uint32_t encode_guid_count = 0;
if(function_list->nvEncGetEncodeGUIDCount(nvenc_encoder, &encode_guid_count) != NV_ENC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeGUIDCount failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
goto fail;
}
if(encode_guid_count == 0)
goto fail;
encoder_guids = calloc(encode_guid_count, sizeof(GUID));
if(!encoder_guids) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to allocate %d guids\n", (int)encode_guid_count);
goto fail;
}
if(function_list->nvEncGetEncodeGUIDs(nvenc_encoder, encoder_guids, encode_guid_count, &encode_guid_count) != NV_ENC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeGUIDs failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
goto fail;
}
for(uint32_t i = 0; i < encode_guid_count; ++i) {
encoder_get_supported_profiles(function_list, nvenc_encoder, &encoder_guids[i], supported_video_codecs, nvenc_api_version);
}
success = true;
fail:
if(encoder_guids)
free(encoder_guids);
return success;
}
bool gsr_get_supported_video_codecs_nvenc(gsr_supported_video_codecs *video_codecs, bool cleanup) {
memset(video_codecs, 0, sizeof(*video_codecs));
bool success = false;
void *nvenc_lib = NULL;
void *nvenc_encoder = NULL;
gsr_cuda cuda;
memset(&cuda, 0, sizeof(cuda));
if(!gsr_cuda_load(&cuda, NULL, false)) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to load cuda\n");
goto done;
}
nvenc_lib = open_nvenc_library();
if(!nvenc_lib)
goto done;
typedef NVENCSTATUS NVENCAPI (*FUNC_NvEncodeAPICreateInstance)(NV_ENCODE_API_FUNCTION_LIST *functionList);
FUNC_NvEncodeAPICreateInstance nvEncodeAPICreateInstance = (FUNC_NvEncodeAPICreateInstance)dlsym(nvenc_lib, "NvEncodeAPICreateInstance");
if(!nvEncodeAPICreateInstance) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to find NvEncodeAPICreateInstance in libnvidia-encode.so\n");
goto done;
}
NV_ENCODE_API_FUNCTION_LIST function_list;
memset(&function_list, 0, sizeof(function_list));
function_list.version = NVENCAPI_STRUCT_VERSION(2);
if(nvEncodeAPICreateInstance(&function_list) != NV_ENC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncodeAPICreateInstance failed\n");
goto done;
}
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params;
memset(&params, 0, sizeof(params));
params.version = NVENCAPI_STRUCT_VERSION(1);
params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
params.device = cuda.cu_ctx;
params.apiVersion = NVENCAPI_VERSION;
if(function_list.nvEncOpenEncodeSessionEx(&params, &nvenc_encoder) != NV_ENC_SUCCESS) {
// Old nvidia gpus dont support the new nvenc api (which is required for av1).
// In such cases fallback to old api version if possible and try again.
function_list.version = NVENCAPI_STRUCT_VERSION_CUSTOM(NVENCAPI_VERSION_470, 2);
if(nvEncodeAPICreateInstance(&function_list) != NV_ENC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncodeAPICreateInstance (retry) failed\n");
goto done;
}
params.version = NVENCAPI_STRUCT_VERSION_CUSTOM(NVENCAPI_VERSION_470, 1);
params.apiVersion = NVENCAPI_VERSION_470;
if(function_list.nvEncOpenEncodeSessionEx(&params, &nvenc_encoder) != NV_ENC_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncOpenEncodeSessionEx (retry) failed\n");
goto done;
}
}
success = get_supported_video_codecs(&function_list, nvenc_encoder, video_codecs, params.apiVersion);
done:
if(cleanup) {
if(nvenc_encoder)
function_list.nvEncDestroyEncoder(nvenc_encoder);
if(nvenc_lib)
dlclose(nvenc_lib);
gsr_cuda_unload(&cuda);
}
return success;
}

236
src/codec_query/vaapi.c Normal file
View File

@ -0,0 +1,236 @@
#include "../../include/codec_query/vaapi.h"
#include "../../include/utils.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <va/va.h>
#include <va/va_drm.h>
static bool profile_is_h264(VAProfile profile) {
switch(profile) {
case 5: // VAProfileH264Baseline
case VAProfileH264Main:
case VAProfileH264High:
case VAProfileH264ConstrainedBaseline:
return true;
default:
return false;
}
}
static bool profile_is_hevc_8bit(VAProfile profile) {
switch(profile) {
case VAProfileHEVCMain:
return true;
default:
return false;
}
}
static bool profile_is_hevc_10bit(VAProfile profile) {
switch(profile) {
case VAProfileHEVCMain10:
//case VAProfileHEVCMain12:
//case VAProfileHEVCMain422_10:
//case VAProfileHEVCMain422_12:
//case VAProfileHEVCMain444:
//case VAProfileHEVCMain444_10:
//case VAProfileHEVCMain444_12:
return true;
default:
return false;
}
}
static bool profile_is_av1(VAProfile profile) {
switch(profile) {
case VAProfileAV1Profile0:
case VAProfileAV1Profile1:
return true;
default:
return false;
}
}
static bool profile_is_vp8(VAProfile profile) {
switch(profile) {
case VAProfileVP8Version0_3:
return true;
default:
return false;
}
}
static bool profile_is_vp9(VAProfile profile) {
switch(profile) {
case VAProfileVP9Profile0:
case VAProfileVP9Profile1:
case VAProfileVP9Profile2:
case VAProfileVP9Profile3:
return true;
default:
return false;
}
}
/* Returns 0, 0 on error */
static vec2i profile_entrypoint_get_max_resolution(VADisplay va_dpy, VAProfile profile, VAEntrypoint entrypoint) {
VAConfigAttrib attribs[2] = {
{
.type = VAConfigAttribMaxPictureWidth,
},
{
.type = VAConfigAttribMaxPictureHeight,
}
};
if(vaGetConfigAttributes(va_dpy, profile, entrypoint, attribs, 2) != VA_STATUS_SUCCESS || (attribs[0].value & VA_ATTRIB_NOT_SUPPORTED) || (attribs[1].value & VA_ATTRIB_NOT_SUPPORTED))
return (vec2i){0, 0};
return (vec2i){ attribs[0].value, attribs[1].value };
}
/* Returns 0 on error or if none is supported */
static VAEntrypoint profile_get_video_encoding_entrypoint(VADisplay va_dpy, VAProfile profile) {
int num_entrypoints = vaMaxNumEntrypoints(va_dpy);
if(num_entrypoints <= 0)
return 0;
VAEntrypoint *entrypoint_list = calloc(num_entrypoints, sizeof(VAEntrypoint));
if(!entrypoint_list)
return 0;
int encoding_entrypoint_index = -1;
int lower_power_entrypoint_index = -1;
if(vaQueryConfigEntrypoints(va_dpy, profile, entrypoint_list, &num_entrypoints) == VA_STATUS_SUCCESS) {
for(int i = 0; i < num_entrypoints; ++i) {
if(entrypoint_list[i] == VAEntrypointEncSlice)
encoding_entrypoint_index = i;
else if(entrypoint_list[i] == VAEntrypointEncSliceLP)
lower_power_entrypoint_index = i;
}
}
VAEntrypoint encoding_entrypoint = 0;
if(encoding_entrypoint_index != -1)
encoding_entrypoint = entrypoint_list[encoding_entrypoint_index];
else if(lower_power_entrypoint_index != -1)
encoding_entrypoint = entrypoint_list[lower_power_entrypoint_index];
free(entrypoint_list);
return encoding_entrypoint;
}
static bool get_supported_video_codecs(VADisplay va_dpy, gsr_supported_video_codecs *video_codecs, bool cleanup) {
*video_codecs = (gsr_supported_video_codecs){0};
bool success = false;
VAProfile *profile_list = NULL;
vaSetInfoCallback(va_dpy, NULL, NULL);
int va_major = 0;
int va_minor = 0;
if(vaInitialize(va_dpy, &va_major, &va_minor) != VA_STATUS_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: vaInitialize failed\n");
return false;
}
int num_profiles = vaMaxNumProfiles(va_dpy);
if(num_profiles <= 0)
goto fail;
profile_list = calloc(num_profiles, sizeof(VAProfile));
if(!profile_list || vaQueryConfigProfiles(va_dpy, profile_list, &num_profiles) != VA_STATUS_SUCCESS)
goto fail;
for(int i = 0; i < num_profiles; ++i) {
if(profile_is_h264(profile_list[i])) {
const VAEntrypoint encoding_entrypoint = profile_get_video_encoding_entrypoint(va_dpy, profile_list[i]);
if(encoding_entrypoint != 0) {
const vec2i max_resolution = profile_entrypoint_get_max_resolution(va_dpy, profile_list[i], encoding_entrypoint);
video_codecs->h264 = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
}
} else if(profile_is_hevc_8bit(profile_list[i])) {
const VAEntrypoint encoding_entrypoint = profile_get_video_encoding_entrypoint(va_dpy, profile_list[i]);
if(encoding_entrypoint != 0) {
const vec2i max_resolution = profile_entrypoint_get_max_resolution(va_dpy, profile_list[i], encoding_entrypoint);
video_codecs->hevc = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
}
} else if(profile_is_hevc_10bit(profile_list[i])) {
const VAEntrypoint encoding_entrypoint = profile_get_video_encoding_entrypoint(va_dpy, profile_list[i]);
if(encoding_entrypoint != 0) {
const vec2i max_resolution = profile_entrypoint_get_max_resolution(va_dpy, profile_list[i], encoding_entrypoint);
video_codecs->hevc_hdr = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
video_codecs->hevc_10bit = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
}
} else if(profile_is_av1(profile_list[i])) {
const VAEntrypoint encoding_entrypoint = profile_get_video_encoding_entrypoint(va_dpy, profile_list[i]);
if(encoding_entrypoint != 0) {
const vec2i max_resolution = profile_entrypoint_get_max_resolution(va_dpy, profile_list[i], encoding_entrypoint);
video_codecs->av1 = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
video_codecs->av1_hdr = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
video_codecs->av1_10bit = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
}
} else if(profile_is_vp8(profile_list[i])) {
const VAEntrypoint encoding_entrypoint = profile_get_video_encoding_entrypoint(va_dpy, profile_list[i]);
if(encoding_entrypoint != 0) {
const vec2i max_resolution = profile_entrypoint_get_max_resolution(va_dpy, profile_list[i], encoding_entrypoint);
video_codecs->vp8 = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
}
} else if(profile_is_vp9(profile_list[i])) {
const VAEntrypoint encoding_entrypoint = profile_get_video_encoding_entrypoint(va_dpy, profile_list[i]);
if(encoding_entrypoint != 0) {
const vec2i max_resolution = profile_entrypoint_get_max_resolution(va_dpy, profile_list[i], encoding_entrypoint);
video_codecs->vp9 = (gsr_supported_video_codec){ true, encoding_entrypoint == VAEntrypointEncSliceLP, max_resolution };
}
}
}
success = true;
fail:
if(profile_list)
free(profile_list);
if(cleanup)
vaTerminate(va_dpy);
return success;
}
bool gsr_get_supported_video_codecs_vaapi(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup) {
memset(video_codecs, 0, sizeof(*video_codecs));
bool success = false;
int drm_fd = -1;
char render_path[128];
if(!gsr_card_path_get_render_path(card_path, render_path)) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
goto done;
}
drm_fd = open(render_path, O_RDWR);
if(drm_fd == -1) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to open device %s\n", render_path);
goto done;
}
VADisplay va_dpy = vaGetDisplayDRM(drm_fd);
if(va_dpy) {
if(!get_supported_video_codecs(va_dpy, video_codecs, cleanup)) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to query supported video codecs for device %s\n", render_path);
goto done;
}
success = true;
}
done:
if(cleanup) {
if(drm_fd > 0)
close(drm_fd);
}
return success;
}

156
src/codec_query/vulkan.c Normal file
View File

@ -0,0 +1,156 @@
#include "../../include/codec_query/vulkan.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <xf86drm.h>
#define VK_NO_PROTOTYPES
//#include <vulkan/vulkan.h>
#define MAX_PHYSICAL_DEVICES 32
static const char *required_device_extensions[] = {
"VK_KHR_external_memory_fd",
"VK_KHR_external_semaphore_fd",
"VK_KHR_video_encode_queue",
"VK_KHR_video_queue",
"VK_KHR_video_maintenance1",
"VK_EXT_external_memory_dma_buf",
"VK_EXT_external_memory_host",
"VK_EXT_image_drm_format_modifier"
};
static int num_required_device_extensions = 8;
bool gsr_get_supported_video_codecs_vulkan(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup) {
memset(video_codecs, 0, sizeof(*video_codecs));
#if 0
bool success = false;
VkInstance instance = NULL;
VkPhysicalDevice physical_devices[MAX_PHYSICAL_DEVICES];
VkDevice device = NULL;
VkExtensionProperties *device_extensions = NULL;
const VkApplicationInfo app_info = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pApplicationName = "GPU Screen Recorder",
.applicationVersion = VK_MAKE_VERSION(1, 0, 0),
.pEngineName = "GPU Screen Recorder",
.engineVersion = VK_MAKE_VERSION(1, 0, 0),
.apiVersion = VK_API_VERSION_1_3,
};
const VkInstanceCreateInfo instance_create_info = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pApplicationInfo = &app_info
};
if(vkCreateInstance(&instance_create_info, NULL, &instance) != VK_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkCreateInstance failed\n");
goto done;
}
uint32_t num_devices = 0;
if(vkEnumeratePhysicalDevices(instance, &num_devices, NULL) != VK_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumeratePhysicalDevices (query num devices) failed\n");
goto done;
}
if(num_devices == 0) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: no vulkan capable device found\n");
goto done;
}
if(num_devices > MAX_PHYSICAL_DEVICES)
num_devices = MAX_PHYSICAL_DEVICES;
if(vkEnumeratePhysicalDevices(instance, &num_devices, physical_devices) != VK_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumeratePhysicalDevices (get data) failed\n");
goto done;
}
VkPhysicalDevice physical_device = NULL;
char device_card_path[128];
for(uint32_t i = 0; i < num_devices; ++i) {
VkPhysicalDeviceDrmPropertiesEXT device_drm_properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT
};
VkPhysicalDeviceProperties2 device_properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &device_drm_properties
};
vkGetPhysicalDeviceProperties2(physical_devices[i], &device_properties);
if(!device_drm_properties.hasPrimary)
continue;
snprintf(device_card_path, sizeof(device_card_path), DRM_DEV_NAME, DRM_DIR_NAME, (int)device_drm_properties.primaryMinor);
if(strcmp(device_card_path, card_path) == 0) {
physical_device = physical_devices[i];
break;
}
}
if(!physical_device) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: failed to find a vulkan device that matches opengl device %s\n", card_path);
goto done;
}
const VkDeviceCreateInfo device_create_info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.enabledExtensionCount = num_required_device_extensions,
.ppEnabledExtensionNames = required_device_extensions
};
if(vkCreateDevice(physical_device, &device_create_info, NULL, &device) != VK_SUCCESS) {
//fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkCreateDevice failed. Device %s likely doesn't support vulkan video encoding\n", card_path);
goto done;
}
uint32_t num_device_extensions = 0;
if(vkEnumerateDeviceExtensionProperties(physical_device, NULL, &num_device_extensions, NULL) != VK_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumerateDeviceExtensionProperties (query num device extensions) failed\n");
goto done;
}
device_extensions = calloc(num_device_extensions, sizeof(VkExtensionProperties));
if(!device_extensions) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: failed to allocate %d device extensions\n", num_device_extensions);
goto done;
}
if(vkEnumerateDeviceExtensionProperties(physical_device, NULL, &num_device_extensions, device_extensions) != VK_SUCCESS) {
fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumerateDeviceExtensionProperties (get data) failed\n");
goto done;
}
for(uint32_t i = 0; i < num_device_extensions; ++i) {
if(strcmp(device_extensions[i].extensionName, "VK_KHR_video_encode_h264") == 0) {
video_codecs->h264 = true;
} else if(strcmp(device_extensions[i].extensionName, "VK_KHR_video_encode_h265") == 0) {
// TODO: Verify if 10bit and hdr are actually supported
video_codecs->hevc = true;
video_codecs->hevc_10bit = true;
video_codecs->hevc_hdr = true;
}
}
success = true;
done:
if(cleanup) {
if(device)
vkDestroyDevice(device, NULL);
if(instance)
vkDestroyInstance(instance, NULL);
}
if(device_extensions)
free(device_extensions);
return success;
#else
// TODO: Low power query
video_codecs->h264 = (gsr_supported_video_codec){ true, false };
video_codecs->hevc = (gsr_supported_video_codec){ true, false };
return true;
#endif
}

948
src/color_conversion.c Normal file
View File

@ -0,0 +1,948 @@
#include "../include/color_conversion.h"
#include "../include/egl.h"
#include <stdio.h>
#include <string.h>
#include <assert.h>
#define GRAPHICS_SHADER_INDEX_Y 0
#define GRAPHICS_SHADER_INDEX_UV 1
#define GRAPHICS_SHADER_INDEX_Y_EXTERNAL 2
#define GRAPHICS_SHADER_INDEX_UV_EXTERNAL 3
#define GRAPHICS_SHADER_INDEX_RGB 4
#define GRAPHICS_SHADER_INDEX_RGB_EXTERNAL 5
#define GRAPHICS_SHADER_INDEX_YUYV_TO_Y 6
#define GRAPHICS_SHADER_INDEX_YUYV_TO_UV 7
#define GRAPHICS_SHADER_INDEX_YUYV_TO_Y_EXTERNAL 8
#define GRAPHICS_SHADER_INDEX_YUYV_TO_UV_EXTERNAL 9
#define GRAPHICS_SHADER_INDEX_YUYV_TO_RGB 10
#define GRAPHICS_SHADER_INDEX_YUYV_TO_RGB_EXTERNAL 11
/* https://en.wikipedia.org/wiki/YCbCr, see study/color_space_transform_matrix.png */
/* ITU-R BT2020, full */
/* https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.2020-2-201510-I!!PDF-E.pdf */
#define RGB_TO_P010_FULL "const mat4 RGBtoYUV = mat4(0.262700, -0.139630, 0.500000, 0.000000,\n" \
" 0.678000, -0.360370, -0.459786, 0.000000,\n" \
" 0.059300, 0.500000, -0.040214, 0.000000,\n" \
" 0.000000, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT2020, limited (full multiplied by (235-16)/255, adding 16/255 to luma) */
#define RGB_TO_P010_LIMITED "const mat4 RGBtoYUV = mat4(0.225613, -0.119918, 0.429412, 0.000000,\n" \
" 0.582282, -0.309494, -0.394875, 0.000000,\n" \
" 0.050928, 0.429412, -0.034537, 0.000000,\n" \
" 0.062745, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT709, full, custom values: 0.2110 0.7110 0.0710 */
/* https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.709-6-201506-I!!PDF-E.pdf */
#define RGB_TO_NV12_FULL "const mat4 RGBtoYUV = mat4(0.211000, -0.113563, 0.500000, 0.000000,\n" \
" 0.711000, -0.382670, -0.450570, 0.000000,\n" \
" 0.071000, 0.500000, -0.044994, 0.000000,\n" \
" 0.000000, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT709, limited, custom values: 0.2100 0.7100 0.0700 (full multiplied by (235-16)/255, adding 16/255 to luma) */
#define RGB_TO_NV12_LIMITED "const mat4 RGBtoYUV = mat4(0.180353, -0.096964, 0.429412, 0.000000,\n" \
" 0.609765, -0.327830, -0.385927, 0.000000,\n" \
" 0.060118, 0.429412, -0.038049, 0.000000,\n" \
" 0.062745, 0.500000, 0.500000, 1.000000);\n"
static const char* color_format_range_get_transform_matrix(gsr_destination_color color_format, gsr_color_range color_range) {
switch(color_format) {
case GSR_DESTINATION_COLOR_NV12: {
switch(color_range) {
case GSR_COLOR_RANGE_LIMITED:
return RGB_TO_NV12_LIMITED;
case GSR_COLOR_RANGE_FULL:
return RGB_TO_NV12_FULL;
}
break;
}
case GSR_DESTINATION_COLOR_P010: {
switch(color_range) {
case GSR_COLOR_RANGE_LIMITED:
return RGB_TO_P010_LIMITED;
case GSR_COLOR_RANGE_FULL:
return RGB_TO_P010_FULL;
}
break;
}
case GSR_DESTINATION_COLOR_RGB8:
return "";
default:
return NULL;
}
return NULL;
}
static int load_graphics_shader_y(gsr_shader *shader, gsr_egl *egl, gsr_color_graphics_uniforms *uniforms, gsr_destination_color color_format, gsr_color_range color_range, bool external_texture) {
const char *color_transform_matrix = color_format_range_get_transform_matrix(color_format, color_range);
char vertex_shader[2048];
snprintf(vertex_shader, sizeof(vertex_shader),
"#version 300 es \n"
"in vec2 pos; \n"
"in vec2 texcoords; \n"
"out vec2 texcoords_out; \n"
"uniform vec2 offset; \n"
"uniform float rotation; \n"
"uniform mat2 rotation_matrix; \n"
"void main() \n"
"{ \n"
" texcoords_out = vec2(texcoords.x - 0.5, texcoords.y - 0.5) * rotation_matrix + vec2(0.5, 0.5); \n"
" gl_Position = vec4(offset.x, offset.y, 0.0, 0.0) + vec4(pos.x, pos.y, 0.0, 1.0); \n"
"} \n");
const char *main_code =
" vec4 pixel = texture(tex1, texcoords_out); \n"
" FragColor.x = (RGBtoYUV * vec4(pixel.rgb, 1.0)).x; \n"
" FragColor.w = pixel.a; \n";
char fragment_shader[2048];
if(external_texture) {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"#extension GL_OES_EGL_image_external : enable \n"
"#extension GL_OES_EGL_image_external_essl3 : require \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform samplerExternalOES tex1; \n"
"out vec4 FragColor; \n"
"%s"
"void main() \n"
"{ \n"
"%s"
"} \n", color_transform_matrix, main_code);
} else {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform sampler2D tex1; \n"
"out vec4 FragColor; \n"
"%s"
"void main() \n"
"{ \n"
"%s"
"} \n", color_transform_matrix, main_code);
}
if(gsr_shader_init(shader, egl, vertex_shader, fragment_shader) != 0)
return -1;
gsr_shader_bind_attribute_location(shader, "pos", 0);
gsr_shader_bind_attribute_location(shader, "texcoords", 1);
uniforms->offset = egl->glGetUniformLocation(shader->program_id, "offset");
uniforms->rotation_matrix = egl->glGetUniformLocation(shader->program_id, "rotation_matrix");
return 0;
}
static unsigned int load_graphics_shader_uv(gsr_shader *shader, gsr_egl *egl, gsr_color_graphics_uniforms *uniforms, gsr_destination_color color_format, gsr_color_range color_range, bool external_texture) {
const char *color_transform_matrix = color_format_range_get_transform_matrix(color_format, color_range);
char vertex_shader[2048];
snprintf(vertex_shader, sizeof(vertex_shader),
"#version 300 es \n"
"in vec2 pos; \n"
"in vec2 texcoords; \n"
"out vec2 texcoords_out; \n"
"uniform vec2 offset; \n"
"uniform float rotation; \n"
"uniform mat2 rotation_matrix; \n"
"void main() \n"
"{ \n"
" texcoords_out = vec2(texcoords.x - 0.5, texcoords.y - 0.5) * rotation_matrix + vec2(0.5, 0.5); \n"
" gl_Position = (vec4(offset.x, offset.y, 0.0, 0.0) + vec4(pos.x, pos.y, 0.0, 1.0)) * vec4(0.5, 0.5, 1.0, 1.0) - vec4(0.5, 0.5, 0.0, 0.0); \n"
"} \n");
const char *main_code =
" vec4 pixel = texture(tex1, texcoords_out); \n"
" FragColor.xy = (RGBtoYUV * vec4(pixel.rgb, 1.0)).yz; \n"
" FragColor.w = pixel.a; \n";
char fragment_shader[2048];
if(external_texture) {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"#extension GL_OES_EGL_image_external : enable \n"
"#extension GL_OES_EGL_image_external_essl3 : require \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform samplerExternalOES tex1; \n"
"out vec4 FragColor; \n"
"%s"
"void main() \n"
"{ \n"
"%s"
"} \n", color_transform_matrix, main_code);
} else {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform sampler2D tex1; \n"
"out vec4 FragColor; \n"
"%s"
"void main() \n"
"{ \n"
"%s"
"} \n", color_transform_matrix, main_code);
}
if(gsr_shader_init(shader, egl, vertex_shader, fragment_shader) != 0)
return -1;
gsr_shader_bind_attribute_location(shader, "pos", 0);
gsr_shader_bind_attribute_location(shader, "texcoords", 1);
uniforms->offset = egl->glGetUniformLocation(shader->program_id, "offset");
uniforms->rotation_matrix = egl->glGetUniformLocation(shader->program_id, "rotation_matrix");
return 0;
}
static unsigned int load_graphics_shader_rgb(gsr_shader *shader, gsr_egl *egl, gsr_color_graphics_uniforms *uniforms, bool external_texture) {
char vertex_shader[2048];
snprintf(vertex_shader, sizeof(vertex_shader),
"#version 300 es \n"
"in vec2 pos; \n"
"in vec2 texcoords; \n"
"out vec2 texcoords_out; \n"
"uniform vec2 offset; \n"
"uniform float rotation; \n"
"uniform mat2 rotation_matrix; \n"
"void main() \n"
"{ \n"
" texcoords_out = vec2(texcoords.x - 0.5, texcoords.y - 0.5) * rotation_matrix + vec2(0.5, 0.5); \n"
" gl_Position = vec4(offset.x, offset.y, 0.0, 0.0) + vec4(pos.x, pos.y, 0.0, 1.0); \n"
"} \n");
const char *main_code =
" vec4 pixel = texture(tex1, texcoords_out); \n"
" FragColor = pixel; \n";
char fragment_shader[2048];
if(external_texture) {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"#extension GL_OES_EGL_image_external : enable \n"
"#extension GL_OES_EGL_image_external_essl3 : require \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform samplerExternalOES tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
} else {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform sampler2D tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
}
if(gsr_shader_init(shader, egl, vertex_shader, fragment_shader) != 0)
return -1;
gsr_shader_bind_attribute_location(shader, "pos", 0);
gsr_shader_bind_attribute_location(shader, "texcoords", 1);
uniforms->offset = egl->glGetUniformLocation(shader->program_id, "offset");
uniforms->rotation_matrix = egl->glGetUniformLocation(shader->program_id, "rotation_matrix");
return 0;
}
static int load_graphics_shader_yuyv_to_y(gsr_shader *shader, gsr_egl *egl, gsr_color_graphics_uniforms *uniforms, bool external_texture) {
char vertex_shader[2048];
snprintf(vertex_shader, sizeof(vertex_shader),
"#version 300 es \n"
"in vec2 pos; \n"
"in vec2 texcoords; \n"
"out vec2 texcoords_out; \n"
"uniform vec2 offset; \n"
"uniform float rotation; \n"
"uniform mat2 rotation_matrix; \n"
"void main() \n"
"{ \n"
" texcoords_out = vec2(texcoords.x - 0.5, texcoords.y - 0.5) * rotation_matrix + vec2(0.5, 0.5); \n"
" gl_Position = vec4(offset.x, offset.y, 0.0, 0.0) + vec4(pos.x, pos.y, 0.0, 1.0); \n"
"} \n");
const char *main_code =
" vec4 pixel = texture(tex1, texcoords_out); \n"
" FragColor.x = pixel.r; \n"
" FragColor.w = 1.0; \n";
char fragment_shader[2048];
if(external_texture) {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"#extension GL_OES_EGL_image_external : enable \n"
"#extension GL_OES_EGL_image_external_essl3 : require \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform samplerExternalOES tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
} else {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform sampler2D tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
}
if(gsr_shader_init(shader, egl, vertex_shader, fragment_shader) != 0)
return -1;
gsr_shader_bind_attribute_location(shader, "pos", 0);
gsr_shader_bind_attribute_location(shader, "texcoords", 1);
uniforms->offset = egl->glGetUniformLocation(shader->program_id, "offset");
uniforms->rotation_matrix = egl->glGetUniformLocation(shader->program_id, "rotation_matrix");
return 0;
}
static unsigned int load_graphics_shader_yuyv_to_uv(gsr_shader *shader, gsr_egl *egl, gsr_color_graphics_uniforms *uniforms, bool external_texture) {
char vertex_shader[2048];
snprintf(vertex_shader, sizeof(vertex_shader),
"#version 300 es \n"
"in vec2 pos; \n"
"in vec2 texcoords; \n"
"out vec2 texcoords_out; \n"
"uniform vec2 offset; \n"
"uniform float rotation; \n"
"uniform mat2 rotation_matrix; \n"
"void main() \n"
"{ \n"
" texcoords_out = vec2(texcoords.x - 0.5, texcoords.y - 0.5) * rotation_matrix + vec2(0.5, 0.5); \n"
" gl_Position = (vec4(offset.x, offset.y, 0.0, 0.0) + vec4(pos.x, pos.y, 0.0, 1.0)) * vec4(0.5, 0.5, 1.0, 1.0) - vec4(0.5, 0.5, 0.0, 0.0); \n"
"} \n");
const char *main_code =
" vec2 resolution = vec2(textureSize(tex1, 0));\n"
" ivec2 uv = ivec2(texcoords_out * resolution);\n"
" float u = 0.0;\n"
" float v = 0.0;\n"
" vec4 this_color = texelFetch(tex1, uv, 0);\n"
" if((uv.x & 1) == 0) {\n"
" vec2 next_color = texelFetch(tex1, uv + ivec2(1, 0), 0).rg;\n"
" u = this_color.g;\n"
" v = next_color.g;\n"
" } else {\n"
" vec2 prev_color = texelFetch(tex1, uv - ivec2(1, 0), 0).rg;\n"
" u = prev_color.g;\n"
" v = this_color.g;\n"
" }\n"
" FragColor.rg = vec2(u, v);\n"
" FragColor.w = 1.0;\n";
char fragment_shader[2048];
if(external_texture) {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"#extension GL_OES_EGL_image_external : enable \n"
"#extension GL_OES_EGL_image_external_essl3 : require \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform samplerExternalOES tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
} else {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform sampler2D tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
}
if(gsr_shader_init(shader, egl, vertex_shader, fragment_shader) != 0)
return -1;
gsr_shader_bind_attribute_location(shader, "pos", 0);
gsr_shader_bind_attribute_location(shader, "texcoords", 1);
uniforms->offset = egl->glGetUniformLocation(shader->program_id, "offset");
uniforms->rotation_matrix = egl->glGetUniformLocation(shader->program_id, "rotation_matrix");
return 0;
}
static unsigned int load_graphics_shader_yuyv_to_rgb(gsr_shader *shader, gsr_egl *egl, gsr_color_graphics_uniforms *uniforms, bool external_texture) {
char vertex_shader[2048];
snprintf(vertex_shader, sizeof(vertex_shader),
"#version 300 es \n"
"in vec2 pos; \n"
"in vec2 texcoords; \n"
"out vec2 texcoords_out; \n"
"uniform vec2 offset; \n"
"uniform float rotation; \n"
"uniform mat2 rotation_matrix; \n"
"void main() \n"
"{ \n"
" texcoords_out = vec2(texcoords.x - 0.5, texcoords.y - 0.5) * rotation_matrix + vec2(0.5, 0.5); \n"
" gl_Position = vec4(offset.x, offset.y, 0.0, 0.0) + vec4(pos.x, pos.y, 0.0, 1.0); \n"
"} \n");
const char *main_code =
" vec2 resolution = vec2(textureSize(tex1, 0));\n"
" ivec2 uv = ivec2(texcoords_out * resolution);\n"
" float y = 0.0;\n"
" float u = 0.0;\n"
" float v = 0.0;\n"
" vec4 this_color = texelFetch(tex1, uv, 0);\n"
" if((uv.x & 1) == 0) {\n"
" vec2 next_color = texelFetch(tex1, uv + ivec2(1, 0), 0).rg;\n"
" y = this_color.r;\n"
" u = this_color.g;\n"
" v = next_color.g;\n"
" } else {\n"
" vec2 prev_color = texelFetch(tex1, uv - ivec2(1, 0), 0).rg;\n"
" y = this_color.r;\n"
" u = prev_color.g;\n"
" v = this_color.g;\n"
" }\n"
" FragColor = vec4(\n"
" y + 1.4065 * (v - 0.5),\n"
" y - 0.3455 * (u - 0.5) - 0.7169 * (v - 0.5),\n"
" y + 1.1790 * (u - 0.5),\n"
" 1.0);\n";
char fragment_shader[4096];
if(external_texture) {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"#extension GL_OES_EGL_image_external : enable \n"
"#extension GL_OES_EGL_image_external_essl3 : require \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform samplerExternalOES tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
} else {
snprintf(fragment_shader, sizeof(fragment_shader),
"#version 300 es \n"
"precision highp float; \n"
"in vec2 texcoords_out; \n"
"uniform sampler2D tex1; \n"
"out vec4 FragColor; \n"
"void main() \n"
"{ \n"
"%s"
"} \n", main_code);
}
if(gsr_shader_init(shader, egl, vertex_shader, fragment_shader) != 0)
return -1;
gsr_shader_bind_attribute_location(shader, "pos", 0);
gsr_shader_bind_attribute_location(shader, "texcoords", 1);
uniforms->offset = egl->glGetUniformLocation(shader->program_id, "offset");
uniforms->rotation_matrix = egl->glGetUniformLocation(shader->program_id, "rotation_matrix");
return 0;
}
static int load_framebuffers(gsr_color_conversion *self) {
/* TODO: Only generate the necessary amount of framebuffers (self->params.num_destination_textures) */
const unsigned int draw_buffer = GL_COLOR_ATTACHMENT0;
self->params.egl->glGenFramebuffers(GSR_COLOR_CONVERSION_MAX_FRAMEBUFFERS, self->framebuffers);
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[0]);
self->params.egl->glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self->params.destination_textures[0], 0);
self->params.egl->glDrawBuffers(1, &draw_buffer);
if(self->params.egl->glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to create framebuffer for Y\n");
goto err;
}
if(self->params.num_destination_textures > 1) {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[1]);
self->params.egl->glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self->params.destination_textures[1], 0);
self->params.egl->glDrawBuffers(1, &draw_buffer);
if(self->params.egl->glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to create framebuffer for UV\n");
goto err;
}
}
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
return 0;
err:
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
return -1;
}
static int create_vertices(gsr_color_conversion *self) {
self->params.egl->glGenVertexArrays(1, &self->vertex_array_object_id);
self->params.egl->glBindVertexArray(self->vertex_array_object_id);
self->params.egl->glGenBuffers(1, &self->vertex_buffer_object_id);
self->params.egl->glBindBuffer(GL_ARRAY_BUFFER, self->vertex_buffer_object_id);
self->params.egl->glBufferData(GL_ARRAY_BUFFER, 24 * sizeof(float), NULL, GL_DYNAMIC_DRAW);
self->params.egl->glEnableVertexAttribArray(0);
self->params.egl->glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)0);
self->params.egl->glEnableVertexAttribArray(1);
self->params.egl->glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)(2 * sizeof(float)));
self->params.egl->glBindVertexArray(0);
return 0;
}
static bool gsr_color_conversion_load_graphics_shaders(gsr_color_conversion *self) {
switch(self->params.destination_color) {
case GSR_DESTINATION_COLOR_NV12:
case GSR_DESTINATION_COLOR_P010: {
if(load_graphics_shader_y(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_Y], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_Y], self->params.destination_color, self->params.color_range, false) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load Y graphics shader\n");
return false;
}
if(load_graphics_shader_uv(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_UV], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_UV], self->params.destination_color, self->params.color_range, false) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load UV graphics shader\n");
return false;
}
if(load_graphics_shader_yuyv_to_y(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_YUYV_TO_Y], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_YUYV_TO_Y], false) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load YUYV to Y graphics shader\n");
return false;
}
if(load_graphics_shader_yuyv_to_uv(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_YUYV_TO_UV], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_YUYV_TO_UV], false) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load YUYV to UV graphics shader\n");
return false;
}
break;
}
case GSR_DESTINATION_COLOR_RGB8: {
if(load_graphics_shader_rgb(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_RGB], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_RGB], false) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load RGB graphics shader\n");
return false;
}
if(load_graphics_shader_yuyv_to_rgb(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_YUYV_TO_RGB], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_YUYV_TO_RGB], false) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load YUYV to RGB graphics shader\n");
return false;
}
break;
}
}
return true;
}
static bool gsr_color_conversion_load_external_graphics_shaders(gsr_color_conversion *self) {
switch(self->params.destination_color) {
case GSR_DESTINATION_COLOR_NV12:
case GSR_DESTINATION_COLOR_P010: {
if(load_graphics_shader_y(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_Y_EXTERNAL], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_Y_EXTERNAL], self->params.destination_color, self->params.color_range, true) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load Y graphics shader (external)\n");
return false;
}
if(load_graphics_shader_uv(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_UV_EXTERNAL], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_UV_EXTERNAL], self->params.destination_color, self->params.color_range, true) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load UV graphics shader (external)\n");
return false;
}
if(load_graphics_shader_yuyv_to_y(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_YUYV_TO_Y_EXTERNAL], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_YUYV_TO_Y_EXTERNAL], true) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load YUYV to Y graphics shader (external)\n");
return false;
}
if(load_graphics_shader_yuyv_to_uv(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_YUYV_TO_UV_EXTERNAL], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_YUYV_TO_UV_EXTERNAL], true) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load YUYV to UV graphics shader (external)\n");
return false;
}
break;
}
case GSR_DESTINATION_COLOR_RGB8: {
if(load_graphics_shader_rgb(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_RGB_EXTERNAL], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_RGB_EXTERNAL], true) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load RGB graphics shader (external)\n");
return false;
}
if(load_graphics_shader_yuyv_to_rgb(&self->graphics_shaders[GRAPHICS_SHADER_INDEX_YUYV_TO_RGB_EXTERNAL], self->params.egl, &self->graphics_uniforms[GRAPHICS_SHADER_INDEX_YUYV_TO_RGB_EXTERNAL], true) != 0) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: failed to load YUYV to RGB graphics shader (external)\n");
return false;
}
break;
}
}
return true;
}
int gsr_color_conversion_init(gsr_color_conversion *self, const gsr_color_conversion_params *params) {
assert(params);
assert(params->egl);
memset(self, 0, sizeof(*self));
self->params.egl = params->egl;
self->params = *params;
switch(self->params.destination_color) {
case GSR_DESTINATION_COLOR_NV12:
case GSR_DESTINATION_COLOR_P010: {
if(self->params.num_destination_textures != 2) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: expected 2 destination textures for destination color NV12/P010, got %d destination texture(s)\n", self->params.num_destination_textures);
goto err;
}
break;
}
case GSR_DESTINATION_COLOR_RGB8: {
if(self->params.num_destination_textures != 1) {
fprintf(stderr, "gsr error: gsr_color_conversion_init: expected 1 destination textures for destination color RGB8, got %d destination texture(s)\n", self->params.num_destination_textures);
goto err;
}
break;
}
}
if(!gsr_color_conversion_load_graphics_shaders(self))
goto err;
if(self->params.load_external_image_shader) {
if(!gsr_color_conversion_load_external_graphics_shaders(self))
goto err;
}
if(load_framebuffers(self) != 0)
goto err;
if(create_vertices(self) != 0)
goto err;
return 0;
err:
gsr_color_conversion_deinit(self);
return -1;
}
void gsr_color_conversion_deinit(gsr_color_conversion *self) {
if(!self->params.egl)
return;
if(self->vertex_buffer_object_id) {
self->params.egl->glDeleteBuffers(1, &self->vertex_buffer_object_id);
self->vertex_buffer_object_id = 0;
}
if(self->vertex_array_object_id) {
self->params.egl->glDeleteVertexArrays(1, &self->vertex_array_object_id);
self->vertex_array_object_id = 0;
}
self->params.egl->glDeleteFramebuffers(GSR_COLOR_CONVERSION_MAX_FRAMEBUFFERS, self->framebuffers);
for(int i = 0; i < GSR_COLOR_CONVERSION_MAX_FRAMEBUFFERS; ++i) {
self->framebuffers[i] = 0;
}
for(int i = 0; i < GSR_COLOR_CONVERSION_MAX_GRAPHICS_SHADERS; ++i) {
gsr_shader_deinit(&self->graphics_shaders[i]);
}
self->params.egl = NULL;
}
static void gsr_color_conversion_apply_rotation(gsr_rotation rotation, float rotation_matrix[2][2]) {
/*
rotation_matrix[0][0] = cos(angle);
rotation_matrix[0][1] = -sin(angle);
rotation_matrix[1][0] = sin(angle);
rotation_matrix[1][1] = cos(angle);
The manual matrix code below is the same as this code above, but without floating-point errors.
This is done to remove any blurring caused by these floating-point errors.
*/
switch(rotation) {
case GSR_ROT_0:
rotation_matrix[0][0] = 1.0f;
rotation_matrix[0][1] = 0.0f;
rotation_matrix[1][0] = 0.0f;
rotation_matrix[1][1] = 1.0f;
break;
case GSR_ROT_90:
rotation_matrix[0][0] = 0.0f;
rotation_matrix[0][1] = -1.0f;
rotation_matrix[1][0] = 1.0f;
rotation_matrix[1][1] = 0.0f;
break;
case GSR_ROT_180:
rotation_matrix[0][0] = -1.0f;
rotation_matrix[0][1] = 0.0f;
rotation_matrix[1][0] = 0.0f;
rotation_matrix[1][1] = -1.0f;
break;
case GSR_ROT_270:
rotation_matrix[0][0] = 0.0f;
rotation_matrix[0][1] = 1.0f;
rotation_matrix[1][0] = -1.0f;
rotation_matrix[1][1] = 0.0f;
break;
}
}
static void gsr_color_conversion_swizzle_texture_source(gsr_color_conversion *self, unsigned int texture_target, gsr_source_color source_color) {
if(source_color == GSR_SOURCE_COLOR_BGR) {
const int swizzle_mask[] = { GL_BLUE, GL_GREEN, GL_RED, 1 };
self->params.egl->glTexParameteriv(texture_target, GL_TEXTURE_SWIZZLE_RGBA, swizzle_mask);
}
}
static void gsr_color_conversion_swizzle_reset(gsr_color_conversion *self, unsigned int texture_target, gsr_source_color source_color) {
if(source_color == GSR_SOURCE_COLOR_BGR) {
const int swizzle_mask[] = { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA };
self->params.egl->glTexParameteriv(texture_target, GL_TEXTURE_SWIZZLE_RGBA, swizzle_mask);
}
}
static void gsr_color_conversion_draw_graphics(gsr_color_conversion *self, unsigned int texture_id, bool external_texture, gsr_rotation rotation, gsr_flip flip, float rotation_matrix[2][2], vec2i source_position, vec2i source_size, vec2i destination_pos, vec2i texture_size, vec2f scale, gsr_source_color source_color) {
if(source_size.x == 0 || source_size.y == 0)
return;
const vec2i dest_texture_size = self->params.destination_textures_size[0];
const unsigned int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
if(rotation == GSR_ROT_90 || rotation == GSR_ROT_270) {
const float tmp = texture_size.x;
texture_size.x = texture_size.y;
texture_size.y = tmp;
}
self->params.egl->glBindTexture(texture_target, texture_id);
gsr_color_conversion_swizzle_texture_source(self, texture_target, source_color);
const vec2f pos_norm = {
((float)destination_pos.x / (dest_texture_size.x == 0 ? 1.0f : (float)dest_texture_size.x)) * 2.0f,
((float)destination_pos.y / (dest_texture_size.y == 0 ? 1.0f : (float)dest_texture_size.y)) * 2.0f,
};
const vec2f size_norm = {
((float)source_size.x / (dest_texture_size.x == 0 ? 1.0f : (float)dest_texture_size.x)) * 2.0f * scale.x,
((float)source_size.y / (dest_texture_size.y == 0 ? 1.0f : (float)dest_texture_size.y)) * 2.0f * scale.y,
};
const vec2f texture_pos_norm = {
(float)source_position.x / (texture_size.x == 0 ? 1.0f : (float)texture_size.x),
(float)source_position.y / (texture_size.y == 0 ? 1.0f : (float)texture_size.y),
};
const vec2f texture_size_norm = {
(float)source_size.x / (texture_size.x == 0 ? 1.0f : (float)texture_size.x),
(float)source_size.y / (texture_size.y == 0 ? 1.0f : (float)texture_size.y),
};
float vertices[] = {
-1.0f + 0.0f, -1.0f + 0.0f + size_norm.y, texture_pos_norm.x, texture_pos_norm.y + texture_size_norm.y,
-1.0f + 0.0f, -1.0f + 0.0f, texture_pos_norm.x, texture_pos_norm.y,
-1.0f + 0.0f + size_norm.x, -1.0f + 0.0f, texture_pos_norm.x + texture_size_norm.x, texture_pos_norm.y,
-1.0f + 0.0f, -1.0f + 0.0f + size_norm.y, texture_pos_norm.x, texture_pos_norm.y + texture_size_norm.y,
-1.0f + 0.0f + size_norm.x, -1.0f + 0.0f, texture_pos_norm.x + texture_size_norm.x, texture_pos_norm.y,
-1.0f + 0.0f + size_norm.x, -1.0f + 0.0f + size_norm.y, texture_pos_norm.x + texture_size_norm.x, texture_pos_norm.y + texture_size_norm.y
};
if(flip & GSR_FLIP_HORIZONTAL) {
for(int i = 0; i < 6; ++i) {
const float prev_x = vertices[i*4 + 2];
vertices[i*4 + 2] = texture_pos_norm.x + texture_size_norm.x - prev_x;
}
}
if(flip & GSR_FLIP_VERTICAL) {
for(int i = 0; i < 6; ++i) {
const float prev_y = vertices[i*4 + 3];
vertices[i*4 + 3] = texture_pos_norm.y + texture_size_norm.y - prev_y;
}
}
self->params.egl->glBindVertexArray(self->vertex_array_object_id);
self->params.egl->glViewport(0, 0, dest_texture_size.x, dest_texture_size.y);
/* TODO: this, also cleanup */
self->params.egl->glBindBuffer(GL_ARRAY_BUFFER, self->vertex_buffer_object_id);
self->params.egl->glBufferSubData(GL_ARRAY_BUFFER, 0, 24 * sizeof(float), vertices);
// TODO:
switch(source_color) {
case GSR_SOURCE_COLOR_RGB:
case GSR_SOURCE_COLOR_BGR: {
switch(self->params.destination_color) {
case GSR_DESTINATION_COLOR_NV12:
case GSR_DESTINATION_COLOR_P010: {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[0]);
//cap_xcomp->params.egl->glClear(GL_COLOR_BUFFER_BIT); // TODO: Do this in a separate clear_ function. We want to do that when using multiple drm to create the final image (multiple monitors for example)
int shader_index = external_texture ? GRAPHICS_SHADER_INDEX_Y_EXTERNAL : GRAPHICS_SHADER_INDEX_Y;
gsr_shader_use(&self->graphics_shaders[shader_index]);
self->params.egl->glUniformMatrix2fv(self->graphics_uniforms[shader_index].rotation_matrix, 1, GL_TRUE, (const float*)rotation_matrix);
self->params.egl->glUniform2f(self->graphics_uniforms[shader_index].offset, pos_norm.x, pos_norm.y);
self->params.egl->glDrawArrays(GL_TRIANGLES, 0, 6);
if(self->params.num_destination_textures > 1) {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[1]);
//cap_xcomp->params.egl->glClear(GL_COLOR_BUFFER_BIT);
shader_index = external_texture ? GRAPHICS_SHADER_INDEX_UV_EXTERNAL : GRAPHICS_SHADER_INDEX_UV;
gsr_shader_use(&self->graphics_shaders[shader_index]);
self->params.egl->glUniformMatrix2fv(self->graphics_uniforms[shader_index].rotation_matrix, 1, GL_TRUE, (const float*)rotation_matrix);
self->params.egl->glUniform2f(self->graphics_uniforms[shader_index].offset, pos_norm.x, pos_norm.y);
self->params.egl->glDrawArrays(GL_TRIANGLES, 0, 6);
}
break;
}
case GSR_DESTINATION_COLOR_RGB8: {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[0]);
//cap_xcomp->params.egl->glClear(GL_COLOR_BUFFER_BIT); // TODO: Do this in a separate clear_ function. We want to do that when using multiple drm to create the final image (multiple monitors for example)
const int shader_index = external_texture ? GRAPHICS_SHADER_INDEX_RGB_EXTERNAL : GRAPHICS_SHADER_INDEX_RGB;
gsr_shader_use(&self->graphics_shaders[shader_index]);
self->params.egl->glUniformMatrix2fv(self->graphics_uniforms[shader_index].rotation_matrix, 1, GL_TRUE, (const float*)rotation_matrix);
self->params.egl->glUniform2f(self->graphics_uniforms[shader_index].offset, pos_norm.x, pos_norm.y);
self->params.egl->glDrawArrays(GL_TRIANGLES, 0, 6);
break;
}
}
break;
}
case GSR_SOURCE_COLOR_YUYV: {
switch(self->params.destination_color) {
case GSR_DESTINATION_COLOR_NV12:
case GSR_DESTINATION_COLOR_P010: {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[0]);
//cap_xcomp->params.egl->glClear(GL_COLOR_BUFFER_BIT); // TODO: Do this in a separate clear_ function. We want to do that when using multiple drm to create the final image (multiple monitors for example)
int shader_index = external_texture ? GRAPHICS_SHADER_INDEX_YUYV_TO_Y_EXTERNAL : GRAPHICS_SHADER_INDEX_YUYV_TO_Y;
gsr_shader_use(&self->graphics_shaders[shader_index]);
self->params.egl->glUniformMatrix2fv(self->graphics_uniforms[shader_index].rotation_matrix, 1, GL_TRUE, (const float*)rotation_matrix);
self->params.egl->glUniform2f(self->graphics_uniforms[shader_index].offset, pos_norm.x, pos_norm.y);
self->params.egl->glDrawArrays(GL_TRIANGLES, 0, 6);
if(self->params.num_destination_textures > 1) {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[1]);
//cap_xcomp->params.egl->glClear(GL_COLOR_BUFFER_BIT);
shader_index = external_texture ? GRAPHICS_SHADER_INDEX_YUYV_TO_UV_EXTERNAL : GRAPHICS_SHADER_INDEX_YUYV_TO_UV;
gsr_shader_use(&self->graphics_shaders[shader_index]);
self->params.egl->glUniformMatrix2fv(self->graphics_uniforms[shader_index].rotation_matrix, 1, GL_TRUE, (const float*)rotation_matrix);
self->params.egl->glUniform2f(self->graphics_uniforms[shader_index].offset, pos_norm.x, pos_norm.y);
self->params.egl->glDrawArrays(GL_TRIANGLES, 0, 6);
}
break;
}
case GSR_DESTINATION_COLOR_RGB8: {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[0]);
//cap_xcomp->params.egl->glClear(GL_COLOR_BUFFER_BIT); // TODO: Do this in a separate clear_ function. We want to do that when using multiple drm to create the final image (multiple monitors for example)
const int shader_index = external_texture ? GRAPHICS_SHADER_INDEX_YUYV_TO_RGB_EXTERNAL : GRAPHICS_SHADER_INDEX_YUYV_TO_RGB;
gsr_shader_use(&self->graphics_shaders[shader_index]);
self->params.egl->glUniformMatrix2fv(self->graphics_uniforms[shader_index].rotation_matrix, 1, GL_TRUE, (const float*)rotation_matrix);
self->params.egl->glUniform2f(self->graphics_uniforms[shader_index].offset, pos_norm.x, pos_norm.y);
self->params.egl->glDrawArrays(GL_TRIANGLES, 0, 6);
break;
}
}
break;
}
}
self->params.egl->glBindVertexArray(0);
self->params.egl->glUseProgram(0);
gsr_color_conversion_swizzle_reset(self, texture_target, source_color);
self->params.egl->glBindTexture(texture_target, 0);
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
void gsr_color_conversion_draw(gsr_color_conversion *self, unsigned int texture_id, vec2i destination_pos, vec2i destination_size, vec2i source_pos, vec2i source_size, vec2i texture_size, gsr_rotation rotation, gsr_flip flip, gsr_source_color source_color, bool external_texture) {
assert(!external_texture || self->params.load_external_image_shader);
if(external_texture && !self->params.load_external_image_shader) {
fprintf(stderr, "gsr error: gsr_color_conversion_draw: external texture not loaded\n");
return;
}
vec2f scale = {0.0f, 0.0f};
if(source_size.x > 0 && source_size.y > 0)
scale = (vec2f){ (double)destination_size.x/(double)source_size.x, (double)destination_size.y/(double)source_size.y };
vec2i source_position = {0, 0};
float rotation_matrix[2][2] = {{0, 0}, {0, 0}};
gsr_color_conversion_apply_rotation(rotation, rotation_matrix);
const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
self->params.egl->glBindTexture(texture_target, texture_id);
source_position.x += source_pos.x;
source_position.y += source_pos.y;
gsr_color_conversion_draw_graphics(self, texture_id, external_texture, rotation, flip, rotation_matrix, source_position, source_size, destination_pos, texture_size, scale, source_color);
self->params.egl->glFlush();
// TODO: Use the minimal barrier required
self->params.egl->glMemoryBarrier(GL_ALL_BARRIER_BITS); // GL_SHADER_IMAGE_ACCESS_BARRIER_BIT
self->params.egl->glUseProgram(0);
self->params.egl->glBindTexture(texture_target, 0);
}
void gsr_color_conversion_clear(gsr_color_conversion *self) {
float color1[4] = {0.0f, 0.0f, 0.0f, 1.0f};
float color2[4] = {0.0f, 0.0f, 0.0f, 1.0f};
switch(self->params.destination_color) {
case GSR_DESTINATION_COLOR_NV12:
case GSR_DESTINATION_COLOR_P010: {
color2[0] = 0.5f;
color2[1] = 0.5f;
color2[2] = 0.0f;
color2[3] = 1.0f;
break;
}
case GSR_DESTINATION_COLOR_RGB8: {
color2[0] = 0.0f;
color2[1] = 0.0f;
color2[2] = 0.0f;
color2[3] = 1.0f;
break;
}
}
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[0]);
self->params.egl->glClearColor(color1[0], color1[1], color1[2], color1[3]);
self->params.egl->glClear(GL_COLOR_BUFFER_BIT);
if(self->params.num_destination_textures > 1) {
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[1]);
self->params.egl->glClearColor(color2[0], color2[1], color2[2], color2[3]);
self->params.egl->glClear(GL_COLOR_BUFFER_BIT);
}
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
void gsr_color_conversion_read_destination_texture(gsr_color_conversion *self, int destination_texture_index, int x, int y, int width, int height, unsigned int color_format, unsigned int data_format, void *pixels) {
assert(destination_texture_index >= 0 && destination_texture_index < self->params.num_destination_textures);
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, self->framebuffers[destination_texture_index]);
self->params.egl->glReadPixels(x, y, width, height, color_format, data_format, pixels);
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
gsr_rotation gsr_monitor_rotation_to_rotation(gsr_monitor_rotation monitor_rotation) {
return (gsr_rotation)monitor_rotation;
}

119
src/cuda.c Normal file
View File

@ -0,0 +1,119 @@
#include "../include/cuda.h"
#include "../include/library_loader.h"
#include <string.h>
#include <stdio.h>
#include <dlfcn.h>
#include <assert.h>
bool gsr_cuda_load(gsr_cuda *self, Display *display, bool do_overclock) {
memset(self, 0, sizeof(gsr_cuda));
self->do_overclock = do_overclock;
dlerror(); /* clear */
void *lib = dlopen("libcuda.so.1", RTLD_LAZY);
if(!lib) {
lib = dlopen("libcuda.so", RTLD_LAZY);
if(!lib) {
fprintf(stderr, "gsr error: gsr_cuda_load failed: failed to load libcuda.so/libcuda.so.1, error: %s\n", dlerror());
return false;
}
}
const dlsym_assign required_dlsym[] = {
{ (void**)&self->cuInit, "cuInit" },
{ (void**)&self->cuDeviceGetCount, "cuDeviceGetCount" },
{ (void**)&self->cuDeviceGet, "cuDeviceGet" },
{ (void**)&self->cuCtxCreate_v2, "cuCtxCreate_v2" },
{ (void**)&self->cuCtxDestroy_v2, "cuCtxDestroy_v2" },
{ (void**)&self->cuCtxPushCurrent_v2, "cuCtxPushCurrent_v2" },
{ (void**)&self->cuCtxPopCurrent_v2, "cuCtxPopCurrent_v2" },
{ (void**)&self->cuGetErrorString, "cuGetErrorString" },
{ (void**)&self->cuMemcpy2D_v2, "cuMemcpy2D_v2" },
{ (void**)&self->cuMemcpy2DAsync_v2, "cuMemcpy2DAsync_v2" },
{ (void**)&self->cuStreamSynchronize, "cuStreamSynchronize" },
{ (void**)&self->cuGraphicsGLRegisterImage, "cuGraphicsGLRegisterImage" },
{ (void**)&self->cuGraphicsEGLRegisterImage, "cuGraphicsEGLRegisterImage" },
{ (void**)&self->cuGraphicsResourceSetMapFlags, "cuGraphicsResourceSetMapFlags" },
{ (void**)&self->cuGraphicsMapResources, "cuGraphicsMapResources" },
{ (void**)&self->cuGraphicsUnmapResources, "cuGraphicsUnmapResources" },
{ (void**)&self->cuGraphicsUnregisterResource, "cuGraphicsUnregisterResource" },
{ (void**)&self->cuGraphicsSubResourceGetMappedArray, "cuGraphicsSubResourceGetMappedArray" },
{ NULL, NULL }
};
CUresult res;
if(!dlsym_load_list(lib, required_dlsym)) {
fprintf(stderr, "gsr error: gsr_cuda_load failed: missing required symbols in libcuda.so/libcuda.so.1\n");
goto fail;
}
res = self->cuInit(0);
if(res != CUDA_SUCCESS) {
const char *err_str = "unknown";
self->cuGetErrorString(res, &err_str);
fprintf(stderr, "gsr error: gsr_cuda_load failed: cuInit failed, error: %s (result: %d)\n", err_str, res);
goto fail;
}
int nGpu = 0;
self->cuDeviceGetCount(&nGpu);
if(nGpu <= 0) {
fprintf(stderr, "gsr error: gsr_cuda_load failed: no cuda supported devices found\n");
goto fail;
}
// TODO: Use the device associated with the opengl graphics context
CUdevice cu_dev;
res = self->cuDeviceGet(&cu_dev, 0);
if(res != CUDA_SUCCESS) {
const char *err_str = "unknown";
self->cuGetErrorString(res, &err_str);
fprintf(stderr, "gsr error: gsr_cuda_load failed: unable to get CUDA device, error: %s (result: %d)\n", err_str, res);
goto fail;
}
res = self->cuCtxCreate_v2(&self->cu_ctx, CU_CTX_SCHED_AUTO, cu_dev);
if(res != CUDA_SUCCESS) {
const char *err_str = "unknown";
self->cuGetErrorString(res, &err_str);
fprintf(stderr, "gsr error: gsr_cuda_load failed: unable to create CUDA context, error: %s (result: %d)\n", err_str, res);
goto fail;
}
if(self->do_overclock && display) {
if(gsr_overclock_load(&self->overclock, display))
gsr_overclock_start(&self->overclock);
else
fprintf(stderr, "gsr warning: gsr_cuda_load: failed to load xnvctrl, failed to overclock memory transfer rate\n");
} else if(self->do_overclock && !display) {
fprintf(stderr, "gsr warning: gsr_cuda_load: overclocking enabled but no X server is running. Overclocking has been disabled\n");
}
self->library = lib;
return true;
fail:
dlclose(lib);
memset(self, 0, sizeof(gsr_cuda));
return false;
}
void gsr_cuda_unload(gsr_cuda *self) {
if(self->do_overclock && self->overclock.xnvctrl.library) {
gsr_overclock_stop(&self->overclock);
gsr_overclock_unload(&self->overclock);
}
if(self->library) {
if(self->cu_ctx) {
self->cuCtxDestroy_v2(self->cu_ctx);
self->cu_ctx = 0;
}
dlclose(self->library);
}
memset(self, 0, sizeof(gsr_cuda));
}

144
src/cursor.c Normal file
View File

@ -0,0 +1,144 @@
#include "../include/cursor.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <X11/extensions/Xfixes.h>
// TODO: Test cursor visibility with XFixesHideCursor
static bool gsr_cursor_set_from_x11_cursor_image(gsr_cursor *self, XFixesCursorImage *x11_cursor_image, bool *visible) {
uint8_t *cursor_data = NULL;
uint8_t *out = NULL;
*visible = false;
if(!x11_cursor_image)
goto err;
if(!x11_cursor_image->pixels)
goto err;
self->hotspot.x = x11_cursor_image->xhot;
self->hotspot.y = x11_cursor_image->yhot;
self->egl->glBindTexture(GL_TEXTURE_2D, self->texture_id);
self->size.x = x11_cursor_image->width;
self->size.y = x11_cursor_image->height;
const unsigned long *pixels = x11_cursor_image->pixels;
cursor_data = malloc(self->size.x * self->size.y * 4);
if(!cursor_data)
goto err;
out = cursor_data;
/* Un-premultiply alpha */
for(int y = 0; y < self->size.y; ++y) {
for(int x = 0; x < self->size.x; ++x) {
uint32_t pixel = *pixels++;
uint8_t *in = (uint8_t*)&pixel;
uint8_t alpha = in[3];
if(alpha == 0) {
alpha = 1;
} else {
*visible = true;
}
out[0] = (float)in[2] * 255.0/(float)alpha;
out[1] = (float)in[1] * 255.0/(float)alpha;
out[2] = (float)in[0] * 255.0/(float)alpha;
out[3] = in[3];
out += 4;
in += 4;
}
}
// TODO: glTextureSubImage2D if same size
self->egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self->size.x, self->size.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, cursor_data);
free(cursor_data);
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
XFree(x11_cursor_image);
return true;
err:
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
if(x11_cursor_image)
XFree(x11_cursor_image);
return false;
}
int gsr_cursor_init(gsr_cursor *self, gsr_egl *egl, Display *display) {
int x_fixes_error_base = 0;
assert(egl);
assert(display);
memset(self, 0, sizeof(*self));
self->egl = egl;
self->display = display;
self->x_fixes_event_base = 0;
if(!XFixesQueryExtension(self->display, &self->x_fixes_event_base, &x_fixes_error_base)) {
fprintf(stderr, "gsr error: gsr_cursor_init: your X11 server is missing the XFixes extension\n");
gsr_cursor_deinit(self);
return -1;
}
self->egl->glGenTextures(1, &self->texture_id);
XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), XFixesDisplayCursorNotifyMask);
gsr_cursor_set_from_x11_cursor_image(self, XFixesGetCursorImage(self->display), &self->visible);
self->cursor_image_set = true;
return 0;
}
void gsr_cursor_deinit(gsr_cursor *self) {
if(!self->display)
return;
if(self->texture_id) {
self->egl->glDeleteTextures(1, &self->texture_id);
self->texture_id = 0;
}
if(self->display)
XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), 0);
self->display = NULL;
self->egl = NULL;
}
bool gsr_cursor_on_event(gsr_cursor *self, XEvent *xev) {
if(!self->display)
return false;
bool updated = false;
if(xev->type == self->x_fixes_event_base + XFixesCursorNotify) {
XFixesCursorNotifyEvent *cursor_notify_event = (XFixesCursorNotifyEvent*)xev;
if(cursor_notify_event->subtype == XFixesDisplayCursorNotify && cursor_notify_event->window == DefaultRootWindow(self->display)) {
self->cursor_image_set = false;
}
}
if(!self->cursor_image_set) {
self->cursor_image_set = true;
gsr_cursor_set_from_x11_cursor_image(self, XFixesGetCursorImage(self->display), &self->visible);
updated = true;
}
return updated;
}
void gsr_cursor_tick(gsr_cursor *self, Window relative_to) {
if(!self->display)
return;
Window dummy_window;
int dummy_i;
unsigned int dummy_u;
XQueryPointer(self->display, relative_to, &dummy_window, &dummy_window, &dummy_i, &dummy_i, &self->position.x, &self->position.y, &dummy_u);
}

475
src/damage.c Normal file
View File

@ -0,0 +1,475 @@
#include "../include/damage.h"
#include "../include/utils.h"
#include "../include/window/window.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <X11/extensions/Xdamage.h>
#include <X11/extensions/Xrandr.h>
typedef struct {
vec2i pos;
vec2i size;
} gsr_rectangle;
static bool rectangles_intersect(gsr_rectangle rect1, gsr_rectangle rect2) {
return rect1.pos.x < rect2.pos.x + rect2.size.x && rect1.pos.x + rect1.size.x > rect2.pos.x &&
rect1.pos.y < rect2.pos.y + rect2.size.y && rect1.pos.y + rect1.size.y > rect2.pos.y;
}
static bool xrandr_is_supported(Display *display) {
int major_version = 0;
int minor_version = 0;
if(!XRRQueryVersion(display, &major_version, &minor_version))
return false;
return major_version > 1 || (major_version == 1 && minor_version >= 2);
}
static int gsr_damage_get_tracked_monitor_index(const gsr_damage *self, const char *monitor_name) {
for(int i = 0; i < self->num_monitors_tracked; ++i) {
if(strcmp(self->monitors_tracked[i].monitor_name, monitor_name) == 0)
return i;
}
return -1;
}
static void add_monitor_callback(const gsr_monitor *monitor, void *userdata) {
gsr_damage *self = userdata;
const int damage_monitor_index = gsr_damage_get_tracked_monitor_index(self, monitor->name);
gsr_damage_monitor *damage_monitor = NULL;
if(damage_monitor_index != -1) {
damage_monitor = &self->monitors_tracked[damage_monitor_index];
damage_monitor->monitor = NULL;
}
if(self->num_monitors + 1 > GSR_DAMAGE_MAX_MONITORS) {
fprintf(stderr, "gsr error: gsr_damage_on_output_change: max monitors reached\n");
return;
}
char *monitor_name_copy = strdup(monitor->name);
if(!monitor_name_copy) {
fprintf(stderr, "gsr error: gsr_damage_on_output_change: strdup failed for monitor: %s\n", monitor->name);
return;
}
self->monitors[self->num_monitors] = *monitor;
self->monitors[self->num_monitors].name = monitor_name_copy;
++self->num_monitors;
if(damage_monitor)
damage_monitor->monitor = &self->monitors[self->num_monitors - 1];
}
bool gsr_damage_init(gsr_damage *self, gsr_egl *egl, gsr_cursor *cursor, bool track_cursor) {
memset(self, 0, sizeof(*self));
self->egl = egl;
self->track_cursor = track_cursor;
self->cursor = cursor;
if(gsr_window_get_display_server(egl->window) != GSR_DISPLAY_SERVER_X11) {
fprintf(stderr, "gsr error: gsr_damage_init: damage tracking is not supported on wayland\n");
return false;
}
self->display = gsr_window_get_display(egl->window);
if(!XDamageQueryExtension(self->display, &self->damage_event, &self->damage_error)) {
fprintf(stderr, "gsr error: gsr_damage_init: XDamage is not supported by your X11 server\n");
gsr_damage_deinit(self);
return false;
}
if(!XRRQueryExtension(self->display, &self->randr_event, &self->randr_error)) {
fprintf(stderr, "gsr error: gsr_damage_init: XRandr is not supported by your X11 server\n");
gsr_damage_deinit(self);
return false;
}
if(!xrandr_is_supported(self->display)) {
fprintf(stderr, "gsr error: gsr_damage_init: your X11 randr version is too old\n");
gsr_damage_deinit(self);
return false;
}
XRRSelectInput(self->display, DefaultRootWindow(self->display), RRScreenChangeNotifyMask | RRCrtcChangeNotifyMask | RROutputChangeNotifyMask);
self->monitor_damage = XDamageCreate(self->display, DefaultRootWindow(self->display), XDamageReportNonEmpty);
if(!self->monitor_damage) {
fprintf(stderr, "gsr error: gsr_damage_init: XDamageCreate failed\n");
gsr_damage_deinit(self);
return false;
}
XDamageSubtract(self->display, self->monitor_damage, None, None);
for_each_active_monitor_output_x11_not_cached(self->display, add_monitor_callback, self);
self->damaged = true;
return true;
}
static void gsr_damage_deinit_monitors(gsr_damage *self) {
for(int i = 0; i < self->num_monitors; ++i) {
free((char*)self->monitors[i].name);
}
self->num_monitors = 0;
}
void gsr_damage_deinit(gsr_damage *self) {
if(self->monitor_damage) {
XDamageDestroy(self->display, self->monitor_damage);
self->monitor_damage = None;
}
for(int i = 0; i < self->num_monitors_tracked; ++i) {
free(self->monitors_tracked[i].monitor_name);
}
self->num_monitors_tracked = 0;
for(int i = 0; i < self->num_windows_tracked; ++i) {
XSelectInput(self->display, self->windows_tracked[i].window_id, 0);
XDamageDestroy(self->display, self->windows_tracked[i].damage);
}
self->num_windows_tracked = 0;
self->all_monitors_tracked_refcount = 0;
gsr_damage_deinit_monitors(self);
self->damage_event = 0;
self->damage_error = 0;
self->randr_event = 0;
self->randr_error = 0;
}
static int gsr_damage_get_tracked_window_index(const gsr_damage *self, int64_t window) {
for(int i = 0; i < self->num_windows_tracked; ++i) {
if(self->windows_tracked[i].window_id == window)
return i;
}
return -1;
}
bool gsr_damage_start_tracking_window(gsr_damage *self, int64_t window) {
if(self->damage_event == 0 || window == None)
return false;
const int damage_window_index = gsr_damage_get_tracked_window_index(self, window);
if(damage_window_index != -1) {
++self->windows_tracked[damage_window_index].refcount;
return true;
}
if(self->num_windows_tracked + 1 > GSR_DAMAGE_MAX_TRACKED_TARGETS) {
fprintf(stderr, "gsr error: gsr_damage_start_tracking_window: max window targets reached\n");
return false;
}
XWindowAttributes win_attr;
win_attr.x = 0;
win_attr.y = 0;
win_attr.width = 0;
win_attr.height = 0;
if(!XGetWindowAttributes(self->display, window, &win_attr))
fprintf(stderr, "gsr warning: gsr_damage_start_tracking_window failed: failed to get window attributes: %ld\n", (long)window);
const Damage damage = XDamageCreate(self->display, window, XDamageReportNonEmpty);
if(!damage) {
fprintf(stderr, "gsr error: gsr_damage_start_tracking_window: XDamageCreate failed\n");
return false;
}
XDamageSubtract(self->display, damage, None, None);
XSelectInput(self->display, window, StructureNotifyMask | ExposureMask);
gsr_damage_window *damage_window = &self->windows_tracked[self->num_windows_tracked];
++self->num_windows_tracked;
damage_window->window_id = window;
damage_window->window_pos.x = win_attr.x;
damage_window->window_pos.y = win_attr.y;
damage_window->window_size.x = win_attr.width;
damage_window->window_size.y = win_attr.height;
damage_window->damage = damage;
damage_window->refcount = 1;
return true;
}
void gsr_damage_stop_tracking_window(gsr_damage *self, int64_t window) {
if(window == None)
return;
const int damage_window_index = gsr_damage_get_tracked_window_index(self, window);
if(damage_window_index == -1)
return;
gsr_damage_window *damage_window = &self->windows_tracked[damage_window_index];
--damage_window->refcount;
if(damage_window->refcount <= 0) {
XSelectInput(self->display, damage_window->window_id, 0);
XDamageDestroy(self->display, damage_window->damage);
self->windows_tracked[damage_window_index] = self->windows_tracked[self->num_windows_tracked - 1];
--self->num_windows_tracked;
}
}
static gsr_monitor* gsr_damage_get_monitor_by_id(gsr_damage *self, RRCrtc id) {
for(int i = 0; i < self->num_monitors; ++i) {
if(self->monitors[i].monitor_identifier == id)
return &self->monitors[i];
}
return NULL;
}
static gsr_monitor* gsr_damage_get_monitor_by_name(gsr_damage *self, const char *name) {
for(int i = 0; i < self->num_monitors; ++i) {
if(strcmp(self->monitors[i].name, name) == 0)
return &self->monitors[i];
}
return NULL;
}
bool gsr_damage_start_tracking_monitor(gsr_damage *self, const char *monitor_name) {
if(self->damage_event == 0)
return false;
if(strcmp(monitor_name, "screen-direct") == 0 || strcmp(monitor_name, "screen-direct-force") == 0)
monitor_name = NULL;
if(!monitor_name) {
++self->all_monitors_tracked_refcount;
return true;
}
const int damage_monitor_index = gsr_damage_get_tracked_monitor_index(self, monitor_name);
if(damage_monitor_index != -1) {
++self->monitors_tracked[damage_monitor_index].refcount;
return true;
}
if(self->num_monitors_tracked + 1 > GSR_DAMAGE_MAX_TRACKED_TARGETS) {
fprintf(stderr, "gsr error: gsr_damage_start_tracking_monitor: max monitor targets reached\n");
return false;
}
char *monitor_name_copy = strdup(monitor_name);
if(!monitor_name_copy) {
fprintf(stderr, "gsr error: gsr_damage_start_tracking_monitor: strdup failed for monitor: %s\n", monitor_name);
return false;
}
gsr_monitor *monitor = gsr_damage_get_monitor_by_name(self, monitor_name);
if(!monitor) {
fprintf(stderr, "gsr error: gsr_damage_start_tracking_monitor: failed to find monitor: %s\n", monitor_name);
free(monitor_name_copy);
return false;
}
gsr_damage_monitor *damage_monitor = &self->monitors_tracked[self->num_monitors_tracked];
++self->num_monitors_tracked;
damage_monitor->monitor_name = monitor_name_copy;
damage_monitor->monitor = monitor;
damage_monitor->refcount = 1;
return true;
}
void gsr_damage_stop_tracking_monitor(gsr_damage *self, const char *monitor_name) {
if(strcmp(monitor_name, "screen-direct") == 0 || strcmp(monitor_name, "screen-direct-force") == 0)
monitor_name = NULL;
if(!monitor_name) {
--self->all_monitors_tracked_refcount;
if(self->all_monitors_tracked_refcount < 0)
self->all_monitors_tracked_refcount = 0;
return;
}
const int damage_monitor_index = gsr_damage_get_tracked_monitor_index(self, monitor_name);
if(damage_monitor_index == -1)
return;
gsr_damage_monitor *damage_monitor = &self->monitors_tracked[damage_monitor_index];
--damage_monitor->refcount;
if(damage_monitor->refcount <= 0) {
free(damage_monitor->monitor_name);
self->monitors_tracked[damage_monitor_index] = self->monitors_tracked[self->num_monitors_tracked - 1];
--self->num_monitors_tracked;
}
}
static void gsr_damage_on_crtc_change(gsr_damage *self, XEvent *xev) {
const XRRCrtcChangeNotifyEvent *rr_crtc_change_event = (XRRCrtcChangeNotifyEvent*)xev;
if(rr_crtc_change_event->crtc == 0)
return;
if(rr_crtc_change_event->width == 0 || rr_crtc_change_event->height == 0)
return;
gsr_monitor *monitor = gsr_damage_get_monitor_by_id(self, rr_crtc_change_event->crtc);
if(!monitor)
return;
if(rr_crtc_change_event->x != monitor->pos.x || rr_crtc_change_event->y != monitor->pos.y ||
(int)rr_crtc_change_event->width != monitor->size.x || (int)rr_crtc_change_event->height != monitor->size.y) {
monitor->pos.x = rr_crtc_change_event->x;
monitor->pos.y = rr_crtc_change_event->y;
monitor->size.x = rr_crtc_change_event->width;
monitor->size.y = rr_crtc_change_event->height;
}
}
static void gsr_damage_on_output_change(gsr_damage *self, XEvent *xev) {
const XRROutputChangeNotifyEvent *rr_output_change_event = (XRROutputChangeNotifyEvent*)xev;
if(!rr_output_change_event->output)
return;
gsr_damage_deinit_monitors(self);
for_each_active_monitor_output_x11_not_cached(self->display, add_monitor_callback, self);
}
static void gsr_damage_on_randr_event(gsr_damage *self, XEvent *xev) {
const XRRNotifyEvent *rr_event = (XRRNotifyEvent*)xev;
switch(rr_event->subtype) {
case RRNotify_CrtcChange:
gsr_damage_on_crtc_change(self, xev);
break;
case RRNotify_OutputChange:
gsr_damage_on_output_change(self, xev);
break;
}
}
static void gsr_damage_on_damage_event(gsr_damage *self, XEvent *xev) {
const XDamageNotifyEvent *de = (XDamageNotifyEvent*)xev;
XserverRegion region = XFixesCreateRegion(self->display, NULL, 0);
/* Subtract all the damage, repairing the window */
XDamageSubtract(self->display, de->damage, None, region);
if(self->all_monitors_tracked_refcount > 0)
self->damaged = true;
if(!self->damaged) {
for(int i = 0; i < self->num_windows_tracked; ++i) {
const gsr_damage_window *damage_window = &self->windows_tracked[i];
if(damage_window->window_id == (int64_t)de->drawable) {
self->damaged = true;
break;
}
}
}
if(!self->damaged) {
int num_rectangles = 0;
XRectangle *rectangles = XFixesFetchRegion(self->display, region, &num_rectangles);
if(rectangles) {
for(int i = 0; i < num_rectangles; ++i) {
const gsr_rectangle damage_region = { (vec2i){rectangles[i].x, rectangles[i].y}, (vec2i){rectangles[i].width, rectangles[i].height} };
for(int j = 0; j < self->num_monitors_tracked; ++j) {
const gsr_monitor *monitor = self->monitors_tracked[j].monitor;
if(!monitor)
continue;
const gsr_rectangle monitor_region = { monitor->pos, monitor->size };
self->damaged = rectangles_intersect(monitor_region, damage_region);
if(self->damaged)
goto intersection_found;
}
}
intersection_found:
XFree(rectangles);
}
}
XFixesDestroyRegion(self->display, region);
XFlush(self->display);
}
static void gsr_damage_on_tick_cursor(gsr_damage *self) {
if(self->cursor->position.x == self->cursor_pos.x && self->cursor->position.y == self->cursor_pos.y)
return;
self->cursor_pos = self->cursor->position;
const gsr_rectangle cursor_region = { self->cursor->position, self->cursor->size };
if(self->all_monitors_tracked_refcount > 0)
self->damaged = true;
if(!self->damaged) {
for(int i = 0; i < self->num_windows_tracked; ++i) {
const gsr_damage_window *damage_window = &self->windows_tracked[i];
const gsr_rectangle window_region = { damage_window->window_pos, damage_window->window_size };
if(rectangles_intersect(window_region, cursor_region)) {
self->damaged = true;
break;
}
}
}
if(!self->damaged) {
for(int i = 0; i < self->num_monitors_tracked; ++i) {
const gsr_monitor *monitor = self->monitors_tracked[i].monitor;
if(!monitor)
continue;
const gsr_rectangle monitor_region = { monitor->pos, monitor->size };
if(rectangles_intersect(monitor_region, cursor_region)) {
self->damaged = true;
break;
}
}
}
}
static void gsr_damage_on_window_configure_notify(gsr_damage *self, XEvent *xev) {
for(int i = 0; i < self->num_windows_tracked; ++i) {
gsr_damage_window *damage_window = &self->windows_tracked[i];
if(damage_window->window_id == (int64_t)xev->xconfigure.window) {
damage_window->window_pos.x = xev->xconfigure.x;
damage_window->window_pos.y = xev->xconfigure.y;
damage_window->window_size.x = xev->xconfigure.width;
damage_window->window_size.y = xev->xconfigure.height;
break;
}
}
}
void gsr_damage_on_event(gsr_damage *self, XEvent *xev) {
if(self->damage_event == 0)
return;
if(xev->type == ConfigureNotify)
gsr_damage_on_window_configure_notify(self, xev);
if(self->randr_event) {
if(xev->type == self->randr_event + RRScreenChangeNotify)
XRRUpdateConfiguration(xev);
if(xev->type == self->randr_event + RRNotify)
gsr_damage_on_randr_event(self, xev);
}
if(self->damage_event && xev->type == self->damage_event + XDamageNotify)
gsr_damage_on_damage_event(self, xev);
}
void gsr_damage_tick(gsr_damage *self) {
if(self->damage_event == 0)
return;
if(self->track_cursor && self->cursor->visible && !self->damaged)
gsr_damage_on_tick_cursor(self);
}
bool gsr_damage_is_damaged(gsr_damage *self) {
return self->damage_event == 0 || self->damaged;
}
void gsr_damage_clear(gsr_damage *self) {
self->damaged = false;
}

913
src/dbus.c Normal file
View File

@ -0,0 +1,913 @@
#include "../include/dbus.h"
#include <sys/random.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
/* TODO: Make non-blocking when GPU Screen Recorder is turned into a library */
/* TODO: Make sure responses matches the requests */
#define DESKTOP_PORTAL_SIGNAL_RULE "type='signal',interface='org.freedesktop.Portal.Request'"
typedef enum {
DICT_TYPE_STRING,
DICT_TYPE_UINT32,
DICT_TYPE_BOOL,
} dict_value_type;
typedef struct {
const char *key;
dict_value_type value_type;
union {
char *str;
dbus_uint32_t u32;
dbus_bool_t boolean;
};
} dict_entry;
static bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
/* TODO: Use other functions on other platforms than linux */
if(getrandom(buffer, buffer_size, 0) < buffer_size) {
fprintf(stderr, "Failed to get random bytes, error: %s\n", strerror(errno));
return false;
}
for(int i = 0; i < buffer_size; ++i) {
unsigned char c = *(unsigned char*)&buffer[i];
buffer[i] = alphabet[c % alphabet_size];
}
return true;
}
static bool generate_random_characters_standard_alphabet(char *buffer, int buffer_size) {
return generate_random_characters(buffer, buffer_size, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62);
}
static const char* dict_value_type_to_string(dict_value_type type) {
switch(type) {
case DICT_TYPE_STRING: return "string";
case DICT_TYPE_UINT32: return "uint32";
case DICT_TYPE_BOOL: return "boolean";
}
return "(unknown)";
}
bool gsr_dbus_init(gsr_dbus *self, const char *screencast_restore_token) {
memset(self, 0, sizeof(*self));
dbus_error_init(&self->err);
self->random_str[DBUS_RANDOM_STR_SIZE] = '\0';
if(!generate_random_characters_standard_alphabet(self->random_str, DBUS_RANDOM_STR_SIZE)) {
fprintf(stderr, "gsr error: gsr_dbus_init: failed to generate random string\n");
return false;
}
self->con = dbus_bus_get(DBUS_BUS_SESSION, &self->err);
if(dbus_error_is_set(&self->err)) {
fprintf(stderr, "gsr error: gsr_dbus_init: dbus_bus_get failed with error: %s\n", self->err.message);
return false;
}
if(!self->con) {
fprintf(stderr, "gsr error: gsr_dbus_init: failed to get dbus session\n");
return false;
}
/* TODO: Check the name */
const int ret = dbus_bus_request_name(self->con, "com.dec05eba.gpu_screen_recorder", DBUS_NAME_FLAG_REPLACE_EXISTING, &self->err);
if(dbus_error_is_set(&self->err)) {
fprintf(stderr, "gsr error: gsr_dbus_init: dbus_bus_request_name failed with error: %s\n", self->err.message);
gsr_dbus_deinit(self);
return false;
}
if(screencast_restore_token) {
self->screencast_restore_token = strdup(screencast_restore_token);
if(!self->screencast_restore_token) {
fprintf(stderr, "gsr error: gsr_dbus_init: failed to clone restore token\n");
gsr_dbus_deinit(self);
return false;
}
}
(void)ret;
// if(ret != DBUS_REQUEST_NAME_REPLY_PRIMARY_OWNER) {
// fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: dbus_bus_request_name failed to get primary owner\n");
// return false;
// }
return true;
}
void gsr_dbus_deinit(gsr_dbus *self) {
if(self->screencast_restore_token) {
free(self->screencast_restore_token);
self->screencast_restore_token = NULL;
}
if(self->desktop_portal_rule_added) {
dbus_bus_remove_match(self->con, DESKTOP_PORTAL_SIGNAL_RULE, NULL);
// dbus_connection_flush(self->con);
self->desktop_portal_rule_added = false;
}
if(self->con) {
dbus_error_free(&self->err);
dbus_bus_release_name(self->con, "com.dec05eba.gpu_screen_recorder", NULL);
// Apparently shouldn't be used when a connection is setup by using dbus_bus_get
//dbus_connection_close(self->con);
dbus_connection_unref(self->con);
self->con = NULL;
}
}
static bool gsr_dbus_desktop_portal_get_property(gsr_dbus *self, const char *interface, const char *property_name, uint32_t *result) {
*result = 0;
DBusMessage *msg = dbus_message_new_method_call(
"org.freedesktop.portal.Desktop", // target for the method call
"/org/freedesktop/portal/desktop", // object to call on
"org.freedesktop.DBus.Properties", // interface to call on
"Get"); // method name
if(!msg) {
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_message_new_method_call failed\n");
return false;
}
DBusMessageIter it;
dbus_message_iter_init_append(msg, &it);
if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &interface)) {
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: failed to add interface\n");
dbus_message_unref(msg);
return false;
}
if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &property_name)) {
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: failed to add property_name\n");
dbus_message_unref(msg);
return false;
}
DBusPendingCall *pending = NULL;
if(!dbus_connection_send_with_reply(self->con, msg, &pending, -1) || !pending) { // -1 is default timeout
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_connection_send_with_reply failed\n");
dbus_message_unref(msg);
return false;
}
dbus_connection_flush(self->con);
//fprintf(stderr, "Request Sent\n");
dbus_message_unref(msg);
msg = NULL;
dbus_pending_call_block(pending);
msg = dbus_pending_call_steal_reply(pending);
if(!msg) {
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_pending_call_steal_reply failed\n");
dbus_pending_call_unref(pending);
dbus_message_unref(msg);
return false;
}
dbus_pending_call_unref(pending);
pending = NULL;
DBusMessageIter resp_args;
if(!dbus_message_iter_init(msg, &resp_args)) {
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is missing arguments\n");
dbus_message_unref(msg);
return false;
} else if(DBUS_TYPE_UINT32 == dbus_message_iter_get_arg_type(&resp_args)) {
dbus_message_iter_get_basic(&resp_args, result);
} else if(DBUS_TYPE_VARIANT == dbus_message_iter_get_arg_type(&resp_args)) {
DBusMessageIter variant_iter;
dbus_message_iter_recurse(&resp_args, &variant_iter);
if(dbus_message_iter_get_arg_type(&variant_iter) == DBUS_TYPE_UINT32) {
dbus_message_iter_get_basic(&variant_iter, result);
} else {
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is not a variant with an uint32, %c\n", dbus_message_iter_get_arg_type(&variant_iter));
dbus_message_unref(msg);
return false;
}
} else {
fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is not an uint32, %c\n", dbus_message_iter_get_arg_type(&resp_args));
dbus_message_unref(msg);
return false;
// TODO: Check dbus_error_is_set?
}
dbus_message_unref(msg);
return true;
}
static uint32_t gsr_dbus_get_screencast_version_cached(gsr_dbus *self) {
if(self->screencast_version == 0)
gsr_dbus_desktop_portal_get_property(self, "org.freedesktop.portal.ScreenCast", "version", &self->screencast_version);
return self->screencast_version;
}
static bool gsr_dbus_ensure_desktop_portal_rule_added(gsr_dbus *self) {
if(self->desktop_portal_rule_added)
return true;
dbus_bus_add_match(self->con, DESKTOP_PORTAL_SIGNAL_RULE, &self->err);
dbus_connection_flush(self->con);
if(dbus_error_is_set(&self->err)) {
fprintf(stderr, "gsr error: gsr_dbus_ensure_desktop_portal_rule_added: failed to add dbus rule %s, error: %s\n", DESKTOP_PORTAL_SIGNAL_RULE, self->err.message);
return false;
}
self->desktop_portal_rule_added = true;
return true;
}
static void gsr_dbus_portal_get_unique_handle_token(gsr_dbus *self, char *buffer, int size) {
snprintf(buffer, size, "gpu_screen_recorder_handle_%s_%u", self->random_str, self->handle_counter++);
}
static void gsr_dbus_portal_get_unique_session_token(gsr_dbus *self, char *buffer, int size) {
snprintf(buffer, size, "gpu_screen_recorder_session_%s", self->random_str);
}
static bool dbus_add_dict(DBusMessageIter *it, const dict_entry *entries, int num_entries) {
DBusMessageIter array_it;
if(!dbus_message_iter_open_container(it, DBUS_TYPE_ARRAY, "{sv}", &array_it))
return false;
for (int i = 0; i < num_entries; ++i) {
DBusMessageIter entry_it = DBUS_MESSAGE_ITER_INIT_CLOSED;
DBusMessageIter variant_it = DBUS_MESSAGE_ITER_INIT_CLOSED;
if(!dbus_message_iter_open_container(&array_it, DBUS_TYPE_DICT_ENTRY, NULL, &entry_it))
goto entry_err;
if(!dbus_message_iter_append_basic(&entry_it, DBUS_TYPE_STRING, &entries[i].key))
goto entry_err;
switch (entries[i].value_type) {
case DICT_TYPE_STRING: {
if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_STRING_AS_STRING, &variant_it))
goto entry_err;
if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_STRING, &entries[i].str))
goto entry_err;
break;
}
case DICT_TYPE_UINT32: {
if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_UINT32_AS_STRING, &variant_it))
goto entry_err;
if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_UINT32, &entries[i].u32))
goto entry_err;
break;
}
case DICT_TYPE_BOOL: {
if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_BOOLEAN_AS_STRING, &variant_it))
goto entry_err;
if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_BOOLEAN, &entries[i].boolean))
goto entry_err;
break;
}
}
dbus_message_iter_close_container(&entry_it, &variant_it);
dbus_message_iter_close_container(&array_it, &entry_it);
continue;
entry_err:
dbus_message_iter_abandon_container_if_open(&array_it, &variant_it);
dbus_message_iter_abandon_container_if_open(&array_it, &entry_it);
dbus_message_iter_abandon_container_if_open(it, &array_it);
return false;
}
return dbus_message_iter_close_container(it, &array_it);
}
/* If |response_msg| is NULL then we dont wait for a response signal */
static bool gsr_dbus_call_screencast_method(gsr_dbus *self, const char *method_name, const char *session_handle, const char *parent_window, const dict_entry *entries, int num_entries, int *resp_fd, DBusMessage **response_msg) {
if(resp_fd)
*resp_fd = -1;
if(response_msg)
*response_msg = NULL;
if(!gsr_dbus_ensure_desktop_portal_rule_added(self))
return false;
DBusMessage *msg = dbus_message_new_method_call(
"org.freedesktop.portal.Desktop", // target for the method call
"/org/freedesktop/portal/desktop", // object to call on
"org.freedesktop.portal.ScreenCast", // interface to call on
method_name); // method name
if(!msg) {
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_message_new_method_call failed\n");
return false;
}
DBusMessageIter it;
dbus_message_iter_init_append(msg, &it);
if(session_handle) {
if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_OBJECT_PATH, &session_handle)) {
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add session_handle\n");
dbus_message_unref(msg);
return false;
}
}
if(parent_window) {
if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &parent_window)) {
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add parent_window\n");
dbus_message_unref(msg);
return false;
}
}
if(!dbus_add_dict(&it, entries, num_entries)) {
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add dict\n");
dbus_message_unref(msg);
return false;
}
DBusPendingCall *pending = NULL;
if(!dbus_connection_send_with_reply(self->con, msg, &pending, -1) || !pending) { // -1 is default timeout
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_connection_send_with_reply failed\n");
dbus_message_unref(msg);
return false;
}
dbus_connection_flush(self->con);
//fprintf(stderr, "Request Sent\n");
dbus_message_unref(msg);
msg = NULL;
dbus_pending_call_block(pending);
msg = dbus_pending_call_steal_reply(pending);
if(!msg) {
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_pending_call_steal_reply failed\n");
dbus_pending_call_unref(pending);
dbus_message_unref(msg);
return false;
}
dbus_pending_call_unref(pending);
pending = NULL;
DBusMessageIter resp_args;
if(!dbus_message_iter_init(msg, &resp_args)) {
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: response message is missing arguments\n");
dbus_message_unref(msg);
return false;
} else if (DBUS_TYPE_OBJECT_PATH == dbus_message_iter_get_arg_type(&resp_args)) {
const char *res = NULL;
dbus_message_iter_get_basic(&resp_args, &res);
} else if(DBUS_TYPE_UNIX_FD == dbus_message_iter_get_arg_type(&resp_args)) {
int fd = -1;
dbus_message_iter_get_basic(&resp_args, &fd);
if(resp_fd)
*resp_fd = fd;
} else if(DBUS_TYPE_STRING == dbus_message_iter_get_arg_type(&resp_args)) {
char *err = NULL;
dbus_message_iter_get_basic(&resp_args, &err);
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed with error: %s\n", err);
dbus_message_unref(msg);
return false;
// TODO: Check dbus_error_is_set?
} else {
fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: response message is not an object path or unix fd\n");
dbus_message_unref(msg);
return false;
// TODO: Check dbus_error_is_set?
}
dbus_message_unref(msg);
if(!response_msg)
return true;
/* TODO: Add timeout, but take into consideration user interactive signals (such as selecting a monitor to capture for ScreenCast) */
for (;;) {
const int timeout_milliseconds = 10;
dbus_connection_read_write(self->con, timeout_milliseconds);
*response_msg = dbus_connection_pop_message(self->con);
if(!*response_msg)
continue;
if(!dbus_message_is_signal(*response_msg, "org.freedesktop.portal.Request", "Response")) {
dbus_message_unref(*response_msg);
*response_msg = NULL;
continue;
}
break;
}
return true;
}
static int gsr_dbus_get_response_status(DBusMessageIter *resp_args) {
if(dbus_message_iter_get_arg_type(resp_args) != DBUS_TYPE_UINT32) {
fprintf(stderr, "gsr error: gsr_dbus_get_response_status: missing uint32 in response\n");
return -1;
}
dbus_uint32_t response_status = 0;
dbus_message_iter_get_basic(resp_args, &response_status);
dbus_message_iter_next(resp_args);
return (int)response_status;
}
static dict_entry* find_dict_entry_by_key(dict_entry *entries, int num_entries, const char *key) {
for(int i = 0; i < num_entries; ++i) {
if(strcmp(entries[i].key, key) == 0)
return &entries[i];
}
return NULL;
}
static bool gsr_dbus_get_variant_value(DBusMessageIter *iter, dict_entry *entry) {
if(dbus_message_iter_get_arg_type(iter) != DBUS_TYPE_VARIANT) {
fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: value is not a variant\n");
return false;
}
DBusMessageIter variant_iter;
dbus_message_iter_recurse(iter, &variant_iter);
switch(dbus_message_iter_get_arg_type(&variant_iter)) {
case DBUS_TYPE_STRING: {
if(entry->value_type != DICT_TYPE_STRING) {
fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was a string\n", dict_value_type_to_string(entry->value_type));
return false;
}
const char *value = NULL;
dbus_message_iter_get_basic(&variant_iter, &value);
if(!value) {
fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: failed to get entry value as value\n");
return false;
}
if(entry->str) {
free(entry->str);
entry->str = NULL;
}
entry->str = strdup(value);
if(!entry->str) {
fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: failed to copy value\n");
return false;
}
return true;
}
case DBUS_TYPE_UINT32: {
if(entry->value_type != DICT_TYPE_UINT32) {
fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was an uint32\n", dict_value_type_to_string(entry->value_type));
return false;
}
dbus_message_iter_get_basic(&variant_iter, &entry->u32);
return true;
}
case DBUS_TYPE_BOOLEAN: {
if(entry->value_type != DICT_TYPE_BOOL) {
fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was a boolean\n", dict_value_type_to_string(entry->value_type));
return false;
}
dbus_message_iter_get_basic(&variant_iter, &entry->boolean);
return true;
}
}
fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: got unexpected type, expected string, uint32 or boolean\n");
return false;
}
/*
Parses a{sv} into matching key entries in |entries|.
If the entry value is a string then it's allocated with malloc and is null-terminated
and has to be free by the caller.
The entry values should be 0 before this method is called.
The entries are free'd if this function fails.
*/
static bool gsr_dbus_get_map(DBusMessageIter *resp_args, dict_entry *entries, int num_entries) {
if(dbus_message_iter_get_arg_type(resp_args) != DBUS_TYPE_ARRAY) {
fprintf(stderr, "gsr error: gsr_dbus_get_map: missing array in response\n");
return false;
}
DBusMessageIter subiter;
dbus_message_iter_recurse(resp_args, &subiter);
while(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_INVALID) {
DBusMessageIter dictiter = DBUS_MESSAGE_ITER_INIT_CLOSED;
const char *key = NULL;
dict_entry *entry = NULL;
// fprintf(stderr, " array element type: %c, %s\n",
// dbus_message_iter_get_arg_type(&subiter),
// dbus_message_iter_get_signature(&subiter));
if(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_DICT_ENTRY) {
fprintf(stderr, "gsr error: gsr_dbus_get_map: array value is not an entry\n");
return false;
}
dbus_message_iter_recurse(&subiter, &dictiter);
if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_STRING) {
fprintf(stderr, "gsr error: gsr_dbus_get_map: entry key is not a string\n");
goto error;
}
dbus_message_iter_get_basic(&dictiter, &key);
if(!key) {
fprintf(stderr, "gsr error: gsr_dbus_get_map: failed to get entry key as value\n");
goto error;
}
entry = find_dict_entry_by_key(entries, num_entries, key);
if(!entry) {
dbus_message_iter_next(&subiter);
continue;
}
if(!dbus_message_iter_next(&dictiter)) {
fprintf(stderr, "gsr error: gsr_dbus_get_map: missing entry value\n");
goto error;
}
if(!gsr_dbus_get_variant_value(&dictiter, entry))
goto error;
dbus_message_iter_next(&subiter);
}
return true;
error:
for(int i = 0; i < num_entries; ++i) {
if(entries[i].value_type == DICT_TYPE_STRING) {
free(entries[i].str);
entries[i].str = NULL;
}
}
return false;
}
int gsr_dbus_screencast_create_session(gsr_dbus *self, char **session_handle) {
assert(session_handle);
*session_handle = NULL;
char handle_token[64];
gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
char session_handle_token[64];
gsr_dbus_portal_get_unique_session_token(self, session_handle_token, sizeof(session_handle_token));
dict_entry args[2];
args[0].key = "handle_token";
args[0].value_type = DICT_TYPE_STRING;
args[0].str = handle_token;
args[1].key = "session_handle_token";
args[1].value_type = DICT_TYPE_STRING;
args[1].str = session_handle_token;
DBusMessage *response_msg = NULL;
if(!gsr_dbus_call_screencast_method(self, "CreateSession", NULL, NULL, args, 2, NULL, &response_msg)) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: failed to setup ScreenCast session. Make sure you have a desktop portal running with support for the ScreenCast interface and that the desktop portal matches the Wayland compositor you are running.\n");
return -1;
}
// TODO: Verify signal path matches |res|, maybe check the below
// DBUS_TYPE_ARRAY value?
//fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
DBusMessageIter resp_args;
if(!dbus_message_iter_init(response_msg, &resp_args)) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing response\n");
dbus_message_unref(response_msg);
return -1;
}
const int response_status = gsr_dbus_get_response_status(&resp_args);
if(response_status != 0) {
dbus_message_unref(response_msg);
return response_status;
}
dict_entry entries[1];
entries[0].key = "session_handle";
entries[0].str = NULL;
entries[0].value_type = DICT_TYPE_STRING;
if(!gsr_dbus_get_map(&resp_args, entries, 1)) {
dbus_message_unref(response_msg);
return -1;
}
if(!entries[0].str) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing \"session_handle\" in response\n");
dbus_message_unref(response_msg);
return -1;
}
*session_handle = entries[0].str;
//fprintf(stderr, "session handle: |%s|\n", entries[0].str);
//free(entries[0].str);
dbus_message_unref(response_msg);
return 0;
}
static uint32_t unset_unsupported_capture_types(uint32_t requested_capture_types, uint32_t available_capture_types) {
if(!(available_capture_types & GSR_PORTAL_CAPTURE_TYPE_MONITOR))
requested_capture_types &= ~GSR_PORTAL_CAPTURE_TYPE_MONITOR;
if(!(available_capture_types & GSR_PORTAL_CAPTURE_TYPE_WINDOW))
requested_capture_types &= ~GSR_PORTAL_CAPTURE_TYPE_WINDOW;
if(!(available_capture_types & GSR_PORTAL_CAPTURE_TYPE_VIRTUAL))
requested_capture_types &= ~GSR_PORTAL_CAPTURE_TYPE_VIRTUAL;
return requested_capture_types;
}
static uint32_t unset_unsupported_cursor_modes(uint32_t requested_cursor_modes, uint32_t available_cursor_modes) {
if(!(available_cursor_modes & GSR_PORTAL_CURSOR_MODE_HIDDEN))
requested_cursor_modes &= ~GSR_PORTAL_CURSOR_MODE_HIDDEN;
if(!(available_cursor_modes & GSR_PORTAL_CURSOR_MODE_EMBEDDED))
requested_cursor_modes &= ~GSR_PORTAL_CURSOR_MODE_EMBEDDED;
if(!(available_cursor_modes & GSR_PORTAL_CURSOR_MODE_METADATA))
requested_cursor_modes &= ~GSR_PORTAL_CURSOR_MODE_METADATA;
return requested_cursor_modes;
}
int gsr_dbus_screencast_select_sources(gsr_dbus *self, const char *session_handle, uint32_t capture_type, uint32_t cursor_mode) {
assert(session_handle);
uint32_t available_source_types = 0;
gsr_dbus_desktop_portal_get_property(self, "org.freedesktop.portal.ScreenCast", "AvailableSourceTypes", &available_source_types);
if(available_source_types == 0)
fprintf(stderr, "gsr error: gsr_dbus_screencast_select_sources: no source types are available\n");
capture_type = unset_unsupported_capture_types(capture_type, available_source_types);
uint32_t available_cursor_modes = 0;
gsr_dbus_desktop_portal_get_property(self, "org.freedesktop.portal.ScreenCast", "AvailableCursorModes", &available_cursor_modes);
if(available_cursor_modes == 0)
fprintf(stderr, "gsr error: gsr_dbus_screencast_select_sources: no cursors modes are available\n");
cursor_mode = unset_unsupported_cursor_modes(cursor_mode, available_cursor_modes);
char handle_token[64];
gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
int num_arg_dict = 4;
dict_entry args[6];
args[0].key = "types";
args[0].value_type = DICT_TYPE_UINT32;
args[0].u32 = capture_type;
args[1].key = "multiple";
args[1].value_type = DICT_TYPE_BOOL;
args[1].boolean = false; /* TODO: Wayland ignores this and still gives the option to select multiple sources. Support that case.. */
args[2].key = "handle_token";
args[2].value_type = DICT_TYPE_STRING;
args[2].str = handle_token;
args[3].key = "cursor_mode";
args[3].value_type = DICT_TYPE_UINT32;
args[3].u32 = cursor_mode;
const int screencast_server_version = gsr_dbus_get_screencast_version_cached(self);
if(screencast_server_version >= 4) {
num_arg_dict = 5;
args[4].key = "persist_mode";
args[4].value_type = DICT_TYPE_UINT32;
args[4].u32 = 2; /* persist until explicitly revoked */
if(self->screencast_restore_token && self->screencast_restore_token[0]) {
num_arg_dict = 6;
args[5].key = "restore_token";
args[5].value_type = DICT_TYPE_STRING;
args[5].str = self->screencast_restore_token;
}
} else if(self->screencast_restore_token && self->screencast_restore_token[0]) {
fprintf(stderr, "gsr warning: gsr_dbus_screencast_select_sources: tried to use restore token but this option is only available in screencast version >= 4, your wayland compositors screencast version is %d\n", screencast_server_version);
}
DBusMessage *response_msg = NULL;
if(!gsr_dbus_call_screencast_method(self, "SelectSources", session_handle, NULL, args, num_arg_dict, NULL, &response_msg)) {
if(num_arg_dict == 6) {
/* We dont know what the error exactly is but assume it may be because of invalid restore token. In that case try without restore token */
fprintf(stderr, "gsr warning: gsr_dbus_screencast_select_sources: SelectSources failed, retrying without restore_token\n");
num_arg_dict = 5;
if(!gsr_dbus_call_screencast_method(self, "SelectSources", session_handle, NULL, args, num_arg_dict, NULL, &response_msg))
return -1;
} else {
return -1;
}
}
// TODO: Verify signal path matches |res|, maybe check the below
//fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
DBusMessageIter resp_args;
if(!dbus_message_iter_init(response_msg, &resp_args)) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing response\n");
dbus_message_unref(response_msg);
return -1;
}
const int response_status = gsr_dbus_get_response_status(&resp_args);
if(response_status != 0) {
dbus_message_unref(response_msg);
return response_status;
}
dbus_message_unref(response_msg);
return 0;
}
static dbus_uint32_t screencast_stream_get_pipewire_node(DBusMessageIter *iter) {
DBusMessageIter subiter;
dbus_message_iter_recurse(iter, &subiter);
if(dbus_message_iter_get_arg_type(&subiter) == DBUS_TYPE_STRUCT) {
DBusMessageIter structiter;
dbus_message_iter_recurse(&subiter, &structiter);
if(dbus_message_iter_get_arg_type(&structiter) == DBUS_TYPE_UINT32) {
dbus_uint32_t data = 0;
dbus_message_iter_get_basic(&structiter, &data);
return data;
}
}
return 0;
}
int gsr_dbus_screencast_start(gsr_dbus *self, const char *session_handle, uint32_t *pipewire_node) {
assert(session_handle);
*pipewire_node = 0;
char handle_token[64];
gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
dict_entry args[1];
args[0].key = "handle_token";
args[0].value_type = DICT_TYPE_STRING;
args[0].str = handle_token;
DBusMessage *response_msg = NULL;
if(!gsr_dbus_call_screencast_method(self, "Start", session_handle, "", args, 1, NULL, &response_msg))
return -1;
// TODO: Verify signal path matches |res|, maybe check the below
//fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
DBusMessageIter resp_args;
if(!dbus_message_iter_init(response_msg, &resp_args)) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing response\n");
dbus_message_unref(response_msg);
return -1;
}
const int response_status = gsr_dbus_get_response_status(&resp_args);
if(response_status != 0) {
dbus_message_unref(response_msg);
return response_status;
}
if(dbus_message_iter_get_arg_type(&resp_args) != DBUS_TYPE_ARRAY) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing array in response\n");
dbus_message_unref(response_msg);
return -1;
}
DBusMessageIter subiter;
dbus_message_iter_recurse(&resp_args, &subiter);
while(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_INVALID) {
DBusMessageIter dictiter = DBUS_MESSAGE_ITER_INIT_CLOSED;
const char *key = NULL;
// fprintf(stderr, " array element type: %c, %s\n",
// dbus_message_iter_get_arg_type(&subiter),
// dbus_message_iter_get_signature(&subiter));
if(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_DICT_ENTRY) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: array value is not an entry\n");
goto error;
}
dbus_message_iter_recurse(&subiter, &dictiter);
if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_STRING) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: entry key is not a string\n");
goto error;
}
dbus_message_iter_get_basic(&dictiter, &key);
if(!key) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: failed to get entry key as value\n");
goto error;
}
if(strcmp(key, "restore_token") == 0) {
if(!dbus_message_iter_next(&dictiter)) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing restore_token value\n");
goto error;
}
if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_VARIANT) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: restore_token is not a variant\n");
goto error;
}
DBusMessageIter variant_iter;
dbus_message_iter_recurse(&dictiter, &variant_iter);
if(dbus_message_iter_get_arg_type(&variant_iter) != DBUS_TYPE_STRING) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: restore_token is not a string\n");
goto error;
}
char *restore_token_str = NULL;
dbus_message_iter_get_basic(&variant_iter, &restore_token_str);
if(restore_token_str) {
if(self->screencast_restore_token) {
free(self->screencast_restore_token);
self->screencast_restore_token = NULL;
}
self->screencast_restore_token = strdup(restore_token_str);
//fprintf(stderr, "got restore token: %s\n", self->screencast_restore_token);
}
} else if(strcmp(key, "streams") == 0) {
if(!dbus_message_iter_next(&dictiter)) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing streams value\n");
goto error;
}
if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_VARIANT) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: streams value is not a variant\n");
goto error;
}
DBusMessageIter variant_iter;
dbus_message_iter_recurse(&dictiter, &variant_iter);
if(dbus_message_iter_get_arg_type(&variant_iter) != DBUS_TYPE_ARRAY) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: streams value is not an array\n");
goto error;
}
int num_streams = dbus_message_iter_get_element_count(&variant_iter);
//fprintf(stderr, "num streams: %d\n", num_streams);
/* Skip over all streams except the last one, since kde can return multiple streams even if only 1 is requested. The last one is the valid one */
for(int i = 0; i < num_streams - 1; ++i) {
screencast_stream_get_pipewire_node(&variant_iter);
}
if(num_streams > 0) {
*pipewire_node = screencast_stream_get_pipewire_node(&variant_iter);
//fprintf(stderr, "pipewire node: %u\n", *pipewire_node);
}
}
dbus_message_iter_next(&subiter);
}
if(*pipewire_node == 0) {
fprintf(stderr, "gsr error: gsr_dbus_screencast_start: no pipewire node returned\n");
goto error;
}
dbus_message_unref(response_msg);
return 0;
error:
dbus_message_unref(response_msg);
return -1;
}
bool gsr_dbus_screencast_open_pipewire_remote(gsr_dbus *self, const char *session_handle, int *pipewire_fd) {
assert(session_handle);
*pipewire_fd = -1;
return gsr_dbus_call_screencast_method(self, "OpenPipeWireRemote", session_handle, NULL, NULL, 0, pipewire_fd, NULL);
}
const char* gsr_dbus_screencast_get_restore_token(gsr_dbus *self) {
return self->screencast_restore_token;
}

100
src/defs.c Normal file
View File

@ -0,0 +1,100 @@
#include "../include/defs.h"
#include <assert.h>
bool video_codec_is_hdr(gsr_video_codec video_codec) {
// TODO: Vulkan
switch(video_codec) {
case GSR_VIDEO_CODEC_HEVC_HDR:
case GSR_VIDEO_CODEC_AV1_HDR:
return true;
default:
return false;
}
}
gsr_video_codec hdr_video_codec_to_sdr_video_codec(gsr_video_codec video_codec) {
// TODO: Vulkan
switch(video_codec) {
case GSR_VIDEO_CODEC_HEVC_HDR:
return GSR_VIDEO_CODEC_HEVC;
case GSR_VIDEO_CODEC_AV1_HDR:
return GSR_VIDEO_CODEC_AV1;
default:
return video_codec;
}
}
gsr_color_depth video_codec_to_bit_depth(gsr_video_codec video_codec) {
// TODO: 10-bit Vulkan
switch(video_codec) {
case GSR_VIDEO_CODEC_HEVC_HDR:
case GSR_VIDEO_CODEC_HEVC_10BIT:
case GSR_VIDEO_CODEC_AV1_HDR:
case GSR_VIDEO_CODEC_AV1_10BIT:
return GSR_COLOR_DEPTH_10_BITS;
default:
return GSR_COLOR_DEPTH_8_BITS;
}
}
const char* video_codec_to_string(gsr_video_codec video_codec) {
switch(video_codec) {
case GSR_VIDEO_CODEC_H264: return "h264";
case GSR_VIDEO_CODEC_HEVC: return "hevc";
case GSR_VIDEO_CODEC_HEVC_HDR: return "hevc_hdr";
case GSR_VIDEO_CODEC_HEVC_10BIT: return "hevc_10bit";
case GSR_VIDEO_CODEC_AV1: return "av1";
case GSR_VIDEO_CODEC_AV1_HDR: return "av1_hdr";
case GSR_VIDEO_CODEC_AV1_10BIT: return "av1_10bit";
case GSR_VIDEO_CODEC_VP8: return "vp8";
case GSR_VIDEO_CODEC_VP9: return "vp9";
case GSR_VIDEO_CODEC_H264_VULKAN: return "h264_vulkan";
case GSR_VIDEO_CODEC_HEVC_VULKAN: return "hevc_vulkan";
}
return "";
}
// bool video_codec_is_hevc(gsr_video_codec video_codec) {
// // TODO: 10-bit vulkan
// switch(video_codec) {
// case GSR_VIDEO_CODEC_HEVC:
// case GSR_VIDEO_CODEC_HEVC_HDR:
// case GSR_VIDEO_CODEC_HEVC_10BIT:
// case GSR_VIDEO_CODEC_HEVC_VULKAN:
// return true;
// default:
// return false;
// }
// }
bool video_codec_is_av1(gsr_video_codec video_codec) {
// TODO: Vulkan
switch(video_codec) {
case GSR_VIDEO_CODEC_AV1:
case GSR_VIDEO_CODEC_AV1_HDR:
case GSR_VIDEO_CODEC_AV1_10BIT:
return true;
default:
return false;
}
}
bool video_codec_is_vulkan(gsr_video_codec video_codec) {
switch(video_codec) {
case GSR_VIDEO_CODEC_H264_VULKAN:
case GSR_VIDEO_CODEC_HEVC_VULKAN:
return true;
default:
return false;
}
}
const char* audio_codec_get_name(gsr_audio_codec audio_codec) {
switch(audio_codec) {
case GSR_AUDIO_CODEC_AAC: return "aac";
case GSR_AUDIO_CODEC_OPUS: return "opus";
case GSR_AUDIO_CODEC_FLAC: return "flac";
}
assert(false);
return "";
}

526
src/egl.c Normal file
View File

@ -0,0 +1,526 @@
#include "../include/egl.h"
#include "../include/window/window.h"
#include "../include/library_loader.h"
#include "../include/utils.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
#include <assert.h>
#include <unistd.h>
// TODO: rename gsr_egl to something else since this includes both egl and glx and in the future maybe vulkan too
#define GLX_DRAWABLE_TYPE 0x8010
#define GLX_RENDER_TYPE 0x8011
#define GLX_RGBA_BIT 0x00000001
#define GLX_WINDOW_BIT 0x00000001
#define GLX_PIXMAP_BIT 0x00000002
#define GLX_BIND_TO_TEXTURE_RGBA_EXT 0x20D1
#define GLX_BIND_TO_TEXTURE_TARGETS_EXT 0x20D3
#define GLX_TEXTURE_2D_BIT_EXT 0x00000002
#define GLX_DOUBLEBUFFER 5
#define GLX_RED_SIZE 8
#define GLX_GREEN_SIZE 9
#define GLX_BLUE_SIZE 10
#define GLX_ALPHA_SIZE 11
#define GLX_DEPTH_SIZE 12
#define GLX_RGBA_TYPE 0x8014
// TODO: Create egl context without surface (in other words, x11/wayland agnostic, doesn't require x11/wayland dependency)
static bool gsr_egl_create_window(gsr_egl *self, bool enable_debug) {
EGLConfig ecfg;
int32_t num_config = 0;
const int32_t attr[] = {
EGL_BUFFER_SIZE, 24,
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES3_BIT,
EGL_NONE, EGL_NONE
};
int32_t ctxattr[] = {
EGL_CONTEXT_CLIENT_VERSION, 3,
EGL_NONE, EGL_NONE, EGL_NONE
};
if(enable_debug) {
ctxattr[2] = EGL_CONTEXT_OPENGL_DEBUG;
ctxattr[3] = EGL_TRUE;
}
self->eglBindAPI(EGL_OPENGL_ES_API);
self->egl_display = self->eglGetDisplay((EGLNativeDisplayType)gsr_window_get_display(self->window));
if(!self->egl_display) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: eglGetDisplay failed\n");
goto fail;
}
if(!self->eglInitialize(self->egl_display, NULL, NULL)) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: eglInitialize failed\n");
goto fail;
}
if(!self->eglChooseConfig(self->egl_display, attr, &ecfg, 1, &num_config) || num_config != 1) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to find a matching config\n");
goto fail;
}
self->egl_context = self->eglCreateContext(self->egl_display, ecfg, NULL, ctxattr);
if(!self->egl_context) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to create egl context\n");
goto fail;
}
self->egl_surface = self->eglCreateWindowSurface(self->egl_display, ecfg, (EGLNativeWindowType)gsr_window_get_window(self->window), NULL);
if(!self->egl_surface) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to create window surface\n");
goto fail;
}
if(!self->eglMakeCurrent(self->egl_display, self->egl_surface, self->egl_surface, self->egl_context)) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to make egl context current\n");
goto fail;
}
return true;
fail:
gsr_egl_unload(self);
return false;
}
static GLXFBConfig glx_fb_config_choose(gsr_egl *self, Display *display) {
// TODO: OpenGL debug context?
const int glx_visual_attribs[] = {
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
// TODO:
//GLX_BIND_TO_TEXTURE_RGBA_EXT, 1,
//GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT,
GLX_DOUBLEBUFFER, True,
GLX_RED_SIZE, 8,
GLX_GREEN_SIZE, 8,
GLX_BLUE_SIZE, 8,
GLX_ALPHA_SIZE, 0,
GLX_DEPTH_SIZE, 0,
None, None
};
// TODO: Cleanup
int c = 0;
GLXFBConfig *fb_configs = self->glXChooseFBConfig(display, DefaultScreen(display), glx_visual_attribs, &c);
if(c == 0 || !fb_configs)
return NULL;
return fb_configs[0];
}
static bool gsr_egl_switch_to_glx_context(gsr_egl *self) {
// TODO: Cleanup
assert(gsr_window_get_display_server(self->window) == GSR_DISPLAY_SERVER_X11);
Display *display = gsr_window_get_display(self->window);
const Window window = (Window)gsr_window_get_window(self->window);
if(self->egl_context) {
self->eglMakeCurrent(self->egl_display, NULL, NULL, NULL);
self->eglDestroyContext(self->egl_display, self->egl_context);
self->egl_context = NULL;
}
if(self->egl_surface) {
self->eglDestroySurface(self->egl_display, self->egl_surface);
self->egl_surface = NULL;
}
if(self->egl_display) {
self->eglTerminate(self->egl_display);
self->egl_display = NULL;
}
self->glx_fb_config = glx_fb_config_choose(self, display);
if(!self->glx_fb_config) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to find a suitable fb config\n");
goto fail;
}
// TODO:
//self->glx_context = self->glXCreateContextAttribsARB(display, self->glx_fb_config, NULL, True, context_attrib_list);
self->glx_context = self->glXCreateNewContext(display, self->glx_fb_config, GLX_RGBA_TYPE, NULL, True);
if(!self->glx_context) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to create glx context\n");
goto fail;
}
if(!self->glXMakeContextCurrent(display, window, window, self->glx_context)) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to make glx context current\n");
goto fail;
}
return true;
fail:
if(self->glx_context) {
self->glXMakeContextCurrent(display, None, None, NULL);
self->glXDestroyContext(display, self->glx_context);
self->glx_context = NULL;
self->glx_fb_config = NULL;
}
return false;
}
static bool gsr_egl_load_egl(gsr_egl *self, void *library) {
const dlsym_assign required_dlsym[] = {
{ (void**)&self->eglGetError, "eglGetError" },
{ (void**)&self->eglGetDisplay, "eglGetDisplay" },
{ (void**)&self->eglInitialize, "eglInitialize" },
{ (void**)&self->eglTerminate, "eglTerminate" },
{ (void**)&self->eglChooseConfig, "eglChooseConfig" },
{ (void**)&self->eglCreateWindowSurface, "eglCreateWindowSurface" },
{ (void**)&self->eglCreateContext, "eglCreateContext" },
{ (void**)&self->eglMakeCurrent, "eglMakeCurrent" },
{ (void**)&self->eglCreateImage, "eglCreateImage" },
{ (void**)&self->eglDestroyContext, "eglDestroyContext" },
{ (void**)&self->eglDestroySurface, "eglDestroySurface" },
{ (void**)&self->eglDestroyImage, "eglDestroyImage" },
{ (void**)&self->eglSwapInterval, "eglSwapInterval" },
{ (void**)&self->eglSwapBuffers, "eglSwapBuffers" },
{ (void**)&self->eglBindAPI, "eglBindAPI" },
{ (void**)&self->eglGetProcAddress, "eglGetProcAddress" },
{ NULL, NULL }
};
if(!dlsym_load_list(library, required_dlsym)) {
fprintf(stderr, "gsr error: gsr_egl_load failed: missing required symbols in libEGL.so.1\n");
return false;
}
return true;
}
static bool gsr_egl_proc_load_egl(gsr_egl *self) {
self->eglExportDMABUFImageQueryMESA = (FUNC_eglExportDMABUFImageQueryMESA)self->eglGetProcAddress("eglExportDMABUFImageQueryMESA");
self->eglExportDMABUFImageMESA = (FUNC_eglExportDMABUFImageMESA)self->eglGetProcAddress("eglExportDMABUFImageMESA");
self->glEGLImageTargetTexture2DOES = (FUNC_glEGLImageTargetTexture2DOES)self->eglGetProcAddress("glEGLImageTargetTexture2DOES");
self->eglQueryDisplayAttribEXT = (FUNC_eglQueryDisplayAttribEXT)self->eglGetProcAddress("eglQueryDisplayAttribEXT");
self->eglQueryDeviceStringEXT = (FUNC_eglQueryDeviceStringEXT)self->eglGetProcAddress("eglQueryDeviceStringEXT");
self->eglQueryDmaBufModifiersEXT = (FUNC_eglQueryDmaBufModifiersEXT)self->eglGetProcAddress("eglQueryDmaBufModifiersEXT");
self->glCreateMemoryObjectsEXT = (FUNC_glCreateMemoryObjectsEXT)self->eglGetProcAddress("glCreateMemoryObjectsEXT");
self->glImportMemoryFdEXT = (FUNC_glImportMemoryFdEXT)self->eglGetProcAddress("glImportMemoryFdEXT");
self->glIsMemoryObjectEXT = (FUNC_glIsMemoryObjectEXT)self->eglGetProcAddress("glIsMemoryObjectEXT");
self->glTexStorageMem2DEXT = (FUNC_glTexStorageMem2DEXT)self->eglGetProcAddress("glTexStorageMem2DEXT");
self->glBufferStorageMemEXT = (FUNC_glBufferStorageMemEXT)self->eglGetProcAddress("glBufferStorageMemEXT");
self->glNamedBufferStorageMemEXT = (FUNC_glNamedBufferStorageMemEXT)self->eglGetProcAddress("glNamedBufferStorageMemEXT");
self->glMemoryObjectParameterivEXT = (FUNC_glMemoryObjectParameterivEXT)self->eglGetProcAddress("glMemoryObjectParameterivEXT");
if(!self->eglExportDMABUFImageQueryMESA) {
fprintf(stderr, "gsr error: gsr_egl_load failed: could not find eglExportDMABUFImageQueryMESA\n");
return false;
}
if(!self->eglExportDMABUFImageMESA) {
fprintf(stderr, "gsr error: gsr_egl_load failed: could not find eglExportDMABUFImageMESA\n");
return false;
}
if(!self->glEGLImageTargetTexture2DOES) {
fprintf(stderr, "gsr error: gsr_egl_load failed: could not find glEGLImageTargetTexture2DOES\n");
return false;
}
return true;
}
static bool gsr_egl_load_glx(gsr_egl *self, void *library) {
const dlsym_assign required_dlsym[] = {
{ (void**)&self->glXGetProcAddress, "glXGetProcAddress" },
{ (void**)&self->glXChooseFBConfig, "glXChooseFBConfig" },
{ (void**)&self->glXMakeContextCurrent, "glXMakeContextCurrent" },
{ (void**)&self->glXCreateNewContext, "glXCreateNewContext" },
{ (void**)&self->glXDestroyContext, "glXDestroyContext" },
{ (void**)&self->glXSwapBuffers, "glXSwapBuffers" },
{ NULL, NULL }
};
if(!dlsym_load_list(library, required_dlsym)) {
fprintf(stderr, "gsr error: gsr_egl_load failed: missing required symbols in libGLX.so.0\n");
return false;
}
self->glXCreateContextAttribsARB = (FUNC_glXCreateContextAttribsARB)self->glXGetProcAddress((const unsigned char*)"glXCreateContextAttribsARB");
if(!self->glXCreateContextAttribsARB) {
fprintf(stderr, "gsr error: gsr_egl_load_glx failed: could not find glXCreateContextAttribsARB\n");
return false;
}
self->glXSwapIntervalEXT = (FUNC_glXSwapIntervalEXT)self->glXGetProcAddress((const unsigned char*)"glXSwapIntervalEXT");
self->glXSwapIntervalMESA = (FUNC_glXSwapIntervalMESA)self->glXGetProcAddress((const unsigned char*)"glXSwapIntervalMESA");
self->glXSwapIntervalSGI = (FUNC_glXSwapIntervalSGI)self->glXGetProcAddress((const unsigned char*)"glXSwapIntervalSGI");
return true;
}
static bool gsr_egl_load_gl(gsr_egl *self, void *library) {
const dlsym_assign required_dlsym[] = {
{ (void**)&self->glGetError, "glGetError" },
{ (void**)&self->glGetString, "glGetString" },
{ (void**)&self->glFlush, "glFlush" },
{ (void**)&self->glFinish, "glFinish" },
{ (void**)&self->glClear, "glClear" },
{ (void**)&self->glClearColor, "glClearColor" },
{ (void**)&self->glGenTextures, "glGenTextures" },
{ (void**)&self->glDeleteTextures, "glDeleteTextures" },
{ (void**)&self->glActiveTexture, "glActiveTexture" },
{ (void**)&self->glBindTexture, "glBindTexture" },
{ (void**)&self->glBindImageTexture, "glBindImageTexture" },
{ (void**)&self->glTexParameteri, "glTexParameteri" },
{ (void**)&self->glTexParameteriv, "glTexParameteriv" },
{ (void**)&self->glTexParameterfv, "glTexParameterfv" },
{ (void**)&self->glTexImage2D, "glTexImage2D" },
{ (void**)&self->glTexSubImage2D, "glTexSubImage2D" },
{ (void**)&self->glTexStorage2D, "glTexStorage2D" },
{ (void**)&self->glGetTexImage, "glGetTexImage" },
{ (void**)&self->glGenFramebuffers, "glGenFramebuffers" },
{ (void**)&self->glBindFramebuffer, "glBindFramebuffer" },
{ (void**)&self->glDeleteFramebuffers, "glDeleteFramebuffers" },
{ (void**)&self->glMemoryBarrier, "glMemoryBarrier" },
{ (void**)&self->glViewport, "glViewport" },
{ (void**)&self->glFramebufferTexture2D, "glFramebufferTexture2D" },
{ (void**)&self->glDrawBuffers, "glDrawBuffers" },
{ (void**)&self->glCheckFramebufferStatus, "glCheckFramebufferStatus" },
{ (void**)&self->glBindBuffer, "glBindBuffer" },
{ (void**)&self->glGenBuffers, "glGenBuffers" },
{ (void**)&self->glBufferData, "glBufferData" },
{ (void**)&self->glBufferSubData, "glBufferSubData" },
{ (void**)&self->glDeleteBuffers, "glDeleteBuffers" },
{ (void**)&self->glGenVertexArrays, "glGenVertexArrays" },
{ (void**)&self->glBindVertexArray, "glBindVertexArray" },
{ (void**)&self->glDeleteVertexArrays, "glDeleteVertexArrays" },
{ (void**)&self->glCreateProgram, "glCreateProgram" },
{ (void**)&self->glCreateShader, "glCreateShader" },
{ (void**)&self->glAttachShader, "glAttachShader" },
{ (void**)&self->glBindAttribLocation, "glBindAttribLocation" },
{ (void**)&self->glCompileShader, "glCompileShader" },
{ (void**)&self->glLinkProgram, "glLinkProgram" },
{ (void**)&self->glShaderSource, "glShaderSource" },
{ (void**)&self->glUseProgram, "glUseProgram" },
{ (void**)&self->glGetProgramInfoLog, "glGetProgramInfoLog" },
{ (void**)&self->glGetShaderiv, "glGetShaderiv" },
{ (void**)&self->glGetShaderInfoLog, "glGetShaderInfoLog" },
{ (void**)&self->glDeleteProgram, "glDeleteProgram" },
{ (void**)&self->glDeleteShader, "glDeleteShader" },
{ (void**)&self->glGetProgramiv, "glGetProgramiv" },
{ (void**)&self->glVertexAttribPointer, "glVertexAttribPointer" },
{ (void**)&self->glEnableVertexAttribArray, "glEnableVertexAttribArray" },
{ (void**)&self->glDrawArrays, "glDrawArrays" },
{ (void**)&self->glEnable, "glEnable" },
{ (void**)&self->glDisable, "glDisable" },
{ (void**)&self->glBlendFunc, "glBlendFunc" },
{ (void**)&self->glPixelStorei, "glPixelStorei" },
{ (void**)&self->glGetUniformLocation, "glGetUniformLocation" },
{ (void**)&self->glUniform1f, "glUniform1f" },
{ (void**)&self->glUniform2f, "glUniform2f" },
{ (void**)&self->glUniform1i, "glUniform1i" },
{ (void**)&self->glUniform2i, "glUniform2i" },
{ (void**)&self->glUniformMatrix2fv, "glUniformMatrix2fv" },
{ (void**)&self->glDebugMessageCallback, "glDebugMessageCallback" },
{ (void**)&self->glScissor, "glScissor" },
{ (void**)&self->glReadPixels, "glReadPixels" },
{ (void**)&self->glMapBufferRange, "glMapBufferRange" },
{ (void**)&self->glUnmapBuffer, "glUnmapBuffer" },
{ (void**)&self->glGetIntegerv, "glGetIntegerv" },
{ NULL, NULL }
};
if(!dlsym_load_list(library, required_dlsym)) {
fprintf(stderr, "gsr error: gsr_egl_load failed: missing required symbols in libGL.so.1\n");
return false;
}
return true;
}
#define GL_DEBUG_TYPE_ERROR 0x824C
#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B
static void debug_callback(unsigned int source, unsigned int type, unsigned int id, unsigned int severity, int length, const char* message, const void* userParam) {
(void)source;
(void)id;
(void)length;
(void)userParam;
if(severity != GL_DEBUG_SEVERITY_NOTIFICATION)
fprintf(stderr, "gsr info: gl callback: %s type = 0x%x, severity = 0x%x, message = %s\n", type == GL_DEBUG_TYPE_ERROR ? "** GL ERROR **" : "", type, severity, message);
}
/* TODO: check for glx swap control extension string (GLX_EXT_swap_control, etc) */
static void set_vertical_sync_enabled(gsr_egl *egl, int enabled) {
int result = 0;
if(egl->glXSwapIntervalEXT) {
assert(gsr_window_get_display_server(egl->window) == GSR_DISPLAY_SERVER_X11);
Display *display = gsr_window_get_display(egl->window);
const Window window = (Window)gsr_window_get_window(egl->window);
egl->glXSwapIntervalEXT(display, window, enabled ? 1 : 0);
} else if(egl->glXSwapIntervalMESA) {
result = egl->glXSwapIntervalMESA(enabled ? 1 : 0);
} else if(egl->glXSwapIntervalSGI) {
result = egl->glXSwapIntervalSGI(enabled ? 1 : 0);
} else {
static int warned = 0;
if (!warned) {
warned = 1;
fprintf(stderr, "gsr warning: setting vertical sync not supported\n");
}
}
if(result != 0)
fprintf(stderr, "gsr warning: setting vertical sync failed\n");
}
static void gsr_egl_disable_vsync(gsr_egl *self) {
switch(self->context_type) {
case GSR_GL_CONTEXT_TYPE_EGL: {
self->eglSwapInterval(self->egl_display, 0);
break;
}
case GSR_GL_CONTEXT_TYPE_GLX: {
set_vertical_sync_enabled(self, 0);
break;
}
}
}
bool gsr_egl_load(gsr_egl *self, gsr_window *window, bool is_monitor_capture, bool enable_debug) {
memset(self, 0, sizeof(gsr_egl));
self->context_type = GSR_GL_CONTEXT_TYPE_EGL;
self->window = window;
dlerror(); /* clear */
self->egl_library = dlopen("libEGL.so.1", RTLD_LAZY);
if(!self->egl_library) {
fprintf(stderr, "gsr error: gsr_egl_load: failed to load libEGL.so.1, error: %s\n", dlerror());
goto fail;
}
self->glx_library = dlopen("libGLX.so.0", RTLD_LAZY);
self->gl_library = dlopen("libGL.so.1", RTLD_LAZY);
if(!self->gl_library) {
fprintf(stderr, "gsr error: gsr_egl_load: failed to load libGL.so.1, error: %s\n", dlerror());
goto fail;
}
if(!gsr_egl_load_egl(self, self->egl_library))
goto fail;
/* In some distros (alpine for example libGLX doesn't exist, but libGL can be used instead) */
if(!gsr_egl_load_glx(self, self->glx_library ? self->glx_library : self->gl_library))
goto fail;
if(!gsr_egl_load_gl(self, self->gl_library))
goto fail;
if(!gsr_egl_proc_load_egl(self))
goto fail;
if(!gsr_egl_create_window(self, enable_debug))
goto fail;
if(!gl_get_gpu_info(self, &self->gpu_info))
goto fail;
if(self->eglQueryDisplayAttribEXT && self->eglQueryDeviceStringEXT) {
intptr_t device = 0;
if(self->eglQueryDisplayAttribEXT(self->egl_display, EGL_DEVICE_EXT, &device) && device)
self->dri_card_path = self->eglQueryDeviceStringEXT((void*)device, EGL_DRM_DEVICE_FILE_EXT);
}
/* Nvfbc requires glx */
if(gsr_window_get_display_server(self->window) == GSR_DISPLAY_SERVER_X11 && is_monitor_capture && self->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA) {
self->context_type = GSR_GL_CONTEXT_TYPE_GLX;
self->dri_card_path = NULL;
if(!gsr_egl_switch_to_glx_context(self))
goto fail;
}
if(enable_debug) {
self->glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
self->glDebugMessageCallback(debug_callback, NULL);
}
self->glEnable(GL_BLEND);
self->glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
self->glPixelStorei(GL_PACK_ALIGNMENT, 1);
self->glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
gsr_egl_disable_vsync(self);
if(self->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA) {
/* This fixes nvenc codecs unable to load on openSUSE tumbleweed because of a cuda error. Don't ask me why */
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
if(inside_flatpak)
system("flatpak-spawn --host -- sh -c 'grep -q openSUSE /etc/os-release && nvidia-smi -f /dev/null'");
else
system("sh -c 'grep -q openSUSE /etc/os-release && nvidia-smi -f /dev/null'");
}
return true;
fail:
gsr_egl_unload(self);
return false;
}
void gsr_egl_unload(gsr_egl *self) {
if(self->egl_context) {
self->eglMakeCurrent(self->egl_display, NULL, NULL, NULL);
self->eglDestroyContext(self->egl_display, self->egl_context);
self->egl_context = NULL;
}
if(self->egl_surface) {
self->eglDestroySurface(self->egl_display, self->egl_surface);
self->egl_surface = NULL;
}
if(self->egl_display) {
self->eglTerminate(self->egl_display);
self->egl_display = NULL;
}
if(self->glx_context) {
assert(gsr_window_get_display_server(self->window) == GSR_DISPLAY_SERVER_X11);
Display *display = gsr_window_get_display(self->window);
self->glXMakeContextCurrent(display, None, None, NULL);
self->glXDestroyContext(display, self->glx_context);
self->glx_context = NULL;
self->glx_fb_config = NULL;
}
if(self->egl_library) {
dlclose(self->egl_library);
self->egl_library = NULL;
}
if(self->glx_library) {
dlclose(self->glx_library);
self->glx_library = NULL;
}
if(self->gl_library) {
dlclose(self->gl_library);
self->gl_library = NULL;
}
memset(self, 0, sizeof(gsr_egl));
}
void gsr_egl_swap_buffers(gsr_egl *self) {
self->glFlush();
// TODO: Use the minimal barrier required
self->glMemoryBarrier(GL_ALL_BARRIER_BITS); // GL_SHADER_IMAGE_ACCESS_BARRIER_BIT
}

155
src/encoder/encoder.c Normal file
View File

@ -0,0 +1,155 @@
#include "../../include/encoder/encoder.h"
#include "../../include/utils.h"
#include <string.h>
#include <stdio.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
bool gsr_encoder_init(gsr_encoder *self, gsr_replay_storage replay_storage, size_t replay_buffer_num_packets, double replay_buffer_time, const char *replay_directory) {
memset(self, 0, sizeof(*self));
self->num_recording_destinations = 0;
self->recording_destination_id_counter = 0;
if(pthread_mutex_init(&self->file_write_mutex, NULL) != 0) {
fprintf(stderr, "gsr error: gsr_encoder_init: failed to create mutex\n");
return false;
}
self->mutex_created = true;
if(replay_buffer_num_packets > 0) {
self->replay_buffer = gsr_replay_buffer_create(replay_storage, replay_directory, replay_buffer_time, replay_buffer_num_packets);
if(!self->replay_buffer) {
fprintf(stderr, "gsr error: gsr_encoder_init: failed to create replay buffer\n");
gsr_encoder_deinit(self);
return false;
}
}
return true;
}
void gsr_encoder_deinit(gsr_encoder *self) {
if(self->mutex_created) {
self->mutex_created = false;
pthread_mutex_destroy(&self->file_write_mutex);
}
if(self->replay_buffer) {
gsr_replay_buffer_destroy(self->replay_buffer);
self->replay_buffer = NULL;
}
self->num_recording_destinations = 0;
self->recording_destination_id_counter = 0;
}
void gsr_encoder_receive_packets(gsr_encoder *self, AVCodecContext *codec_context, int64_t pts, int stream_index) {
for(;;) {
AVPacket *av_packet = av_packet_alloc();
if(!av_packet)
break;
av_packet->data = NULL;
av_packet->size = 0;
int res = avcodec_receive_packet(codec_context, av_packet);
if(res == 0) { // we have a packet, send the packet to the muxer
av_packet->stream_index = stream_index;
av_packet->pts = pts;
av_packet->dts = pts;
if(self->replay_buffer) {
const double time_now = clock_get_monotonic_seconds();
if(!gsr_replay_buffer_append(self->replay_buffer, av_packet, time_now))
fprintf(stderr, "gsr error: gsr_encoder_receive_packets: failed to add replay buffer data\n");
}
pthread_mutex_lock(&self->file_write_mutex);
const bool is_keyframe = av_packet->flags & AV_PKT_FLAG_KEY;
for(size_t i = 0; i < self->num_recording_destinations; ++i) {
gsr_encoder_recording_destination *recording_destination = &self->recording_destinations[i];
if(recording_destination->codec_context != codec_context)
continue;
if(is_keyframe)
recording_destination->has_received_keyframe = true;
else if(!recording_destination->has_received_keyframe)
continue;
av_packet->pts = pts - recording_destination->start_pts;
av_packet->dts = pts - recording_destination->start_pts;
av_packet_rescale_ts(av_packet, codec_context->time_base, recording_destination->stream->time_base);
// TODO: Is av_interleaved_write_frame needed?. Answer: might be needed for mkv but dont use it! it causes frames to be inconsistent, skipping frames and duplicating frames.
// TODO: av_interleaved_write_frame might be needed for cfr, or always for flv
const int ret = av_write_frame(recording_destination->format_context, av_packet);
if(ret < 0) {
char error_buffer[AV_ERROR_MAX_STRING_SIZE];
if(av_strerror(ret, error_buffer, sizeof(error_buffer)) < 0)
snprintf(error_buffer, sizeof(error_buffer), "Unknown error");
fprintf(stderr, "gsr error: gsr_encoder_receive_packets: failed to write frame index %d to muxer, reason: %s (%d)\n", av_packet->stream_index, error_buffer, ret);
}
}
pthread_mutex_unlock(&self->file_write_mutex);
av_packet_free(&av_packet);
} else if (res == AVERROR(EAGAIN)) { // we have no packet
// fprintf(stderr, "No packet!\n");
av_packet_free(&av_packet);
break;
} else if (res == AVERROR_EOF) { // this is the end of the stream
av_packet_free(&av_packet);
fprintf(stderr, "End of stream!\n");
break;
} else {
av_packet_free(&av_packet);
fprintf(stderr, "Unexpected error: %d\n", res);
break;
}
}
}
size_t gsr_encoder_add_recording_destination(gsr_encoder *self, AVCodecContext *codec_context, AVFormatContext *format_context, AVStream *stream, int64_t start_pts) {
if(self->num_recording_destinations >= GSR_MAX_RECORDING_DESTINATIONS) {
fprintf(stderr, "gsr error: gsr_encoder_add_recording_destination: failed to add destination, reached the max amount of recording destinations (%d)\n", GSR_MAX_RECORDING_DESTINATIONS);
return (size_t)-1;
}
for(size_t i = 0; i < self->num_recording_destinations; ++i) {
if(self->recording_destinations[i].stream == stream) {
fprintf(stderr, "gsr error: gsr_encoder_add_recording_destination: failed to add destination, the stream %p already exists as an output\n", (void*)stream);
return (size_t)-1;
}
}
pthread_mutex_lock(&self->file_write_mutex);
gsr_encoder_recording_destination *recording_destination = &self->recording_destinations[self->num_recording_destinations];
recording_destination->id = self->recording_destination_id_counter;
recording_destination->codec_context = codec_context;
recording_destination->format_context = format_context;
recording_destination->stream = stream;
recording_destination->start_pts = start_pts;
recording_destination->has_received_keyframe = false;
++self->recording_destination_id_counter;
++self->num_recording_destinations;
pthread_mutex_unlock(&self->file_write_mutex);
return recording_destination->id;
}
bool gsr_encoder_remove_recording_destination(gsr_encoder *self, size_t id) {
bool found = false;
pthread_mutex_lock(&self->file_write_mutex);
for(size_t i = 0; i < self->num_recording_destinations; ++i) {
if(self->recording_destinations[i].id == id) {
self->recording_destinations[i] = self->recording_destinations[self->num_recording_destinations - 1];
--self->num_recording_destinations;
found = true;
break;
}
}
pthread_mutex_unlock(&self->file_write_mutex);
return found;
}

241
src/encoder/video/nvenc.c Normal file
View File

@ -0,0 +1,241 @@
#include "../../../include/encoder/video/nvenc.h"
#include "../../../include/egl.h"
#include "../../../include/cuda.h"
#include "../../../include/window/window.h"
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_cuda.h>
#include <stdlib.h>
typedef struct {
gsr_video_encoder_nvenc_params params;
unsigned int target_textures[2];
vec2i target_texture_size[2];
AVBufferRef *device_ctx;
gsr_cuda cuda;
CUgraphicsResource cuda_graphics_resources[2];
CUarray mapped_arrays[2];
CUstream cuda_stream;
} gsr_video_encoder_nvenc;
static bool gsr_video_encoder_nvenc_setup_context(gsr_video_encoder_nvenc *self, AVCodecContext *video_codec_context) {
self->device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
if(!self->device_ctx) {
fprintf(stderr, "gsr error: gsr_video_encoder_nvenc_setup_context failed: failed to create hardware device context\n");
return false;
}
AVHWDeviceContext *hw_device_context = (AVHWDeviceContext*)self->device_ctx->data;
AVCUDADeviceContext *cuda_device_context = (AVCUDADeviceContext*)hw_device_context->hwctx;
cuda_device_context->cuda_ctx = self->cuda.cu_ctx;
if(av_hwdevice_ctx_init(self->device_ctx) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_nvenc_setup_context failed: failed to create hardware device context\n");
av_buffer_unref(&self->device_ctx);
return false;
}
AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
if(!frame_context) {
fprintf(stderr, "gsr error: gsr_video_encoder_nvenc_setup_context failed: failed to create hwframe context\n");
av_buffer_unref(&self->device_ctx);
return false;
}
AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
hw_frame_context->width = video_codec_context->width;
hw_frame_context->height = video_codec_context->height;
hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
hw_frame_context->format = video_codec_context->pix_fmt;
hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
if (av_hwframe_ctx_init(frame_context) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_nvenc_setup_context failed: failed to initialize hardware frame context "
"(note: ffmpeg version needs to be > 4.0)\n");
av_buffer_unref(&self->device_ctx);
//av_buffer_unref(&frame_context);
return false;
}
self->cuda_stream = cuda_device_context->stream;
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
av_buffer_unref(&frame_context);
return true;
}
static bool cuda_register_opengl_texture(gsr_cuda *cuda, CUgraphicsResource *cuda_graphics_resource, CUarray *mapped_array, unsigned int texture_id) {
CUresult res;
res = cuda->cuGraphicsGLRegisterImage(cuda_graphics_resource, texture_id, GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_NONE);
if (res != CUDA_SUCCESS) {
const char *err_str = "unknown";
cuda->cuGetErrorString(res, &err_str);
fprintf(stderr, "gsr error: cuda_register_opengl_texture: cuGraphicsGLRegisterImage failed, error: %s, texture " "id: %u\n", err_str, texture_id);
return false;
}
res = cuda->cuGraphicsResourceSetMapFlags(*cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
res = cuda->cuGraphicsMapResources(1, cuda_graphics_resource, 0);
res = cuda->cuGraphicsSubResourceGetMappedArray(mapped_array, *cuda_graphics_resource, 0, 0);
return true;
}
static bool gsr_video_encoder_nvenc_setup_textures(gsr_video_encoder_nvenc *self, AVCodecContext *video_codec_context, AVFrame *frame) {
const int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0);
if(res < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_nvenc_setup_textures: av_hwframe_get_buffer failed: %d\n", res);
return false;
}
const unsigned int internal_formats_nv12[2] = { GL_R8, GL_RG8 };
const unsigned int internal_formats_p010[2] = { GL_R16, GL_RG16 };
const unsigned int formats[2] = { GL_RED, GL_RG };
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
self->target_texture_size[i] = (vec2i){ video_codec_context->width / div[i], video_codec_context->height / div[i] };
self->target_textures[i] = gl_create_texture(self->params.egl, self->target_texture_size[i].x, self->target_texture_size[i].y, self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i], GL_NEAREST);
if(self->target_textures[i] == 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_nvenc_setup_textures: failed to create opengl texture\n");
return false;
}
if(!cuda_register_opengl_texture(&self->cuda, &self->cuda_graphics_resources[i], &self->mapped_arrays[i], self->target_textures[i])) {
return false;
}
}
return true;
}
static void gsr_video_encoder_nvenc_stop(gsr_video_encoder_nvenc *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_nvenc_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_video_encoder_nvenc *self = encoder->priv;
const bool is_x11 = gsr_window_get_display_server(self->params.egl->window) == GSR_DISPLAY_SERVER_X11;
const bool overclock = is_x11 ? self->params.overclock : false;
Display *display = is_x11 ? gsr_window_get_display(self->params.egl->window) : NULL;
if(!gsr_cuda_load(&self->cuda, display, overclock)) {
fprintf(stderr, "gsr error: gsr_video_encoder_nvenc_start: failed to load cuda\n");
gsr_video_encoder_nvenc_stop(self, video_codec_context);
return false;
}
video_codec_context->width = FFALIGN(video_codec_context->width, 2);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
if(video_codec_context->width < 128)
video_codec_context->width = 128;
if(video_codec_context->height < 128)
video_codec_context->height = 128;
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
if(!gsr_video_encoder_nvenc_setup_context(self, video_codec_context)) {
gsr_video_encoder_nvenc_stop(self, video_codec_context);
return false;
}
if(!gsr_video_encoder_nvenc_setup_textures(self, video_codec_context, frame)) {
gsr_video_encoder_nvenc_stop(self, video_codec_context);
return false;
}
return true;
}
void gsr_video_encoder_nvenc_stop(gsr_video_encoder_nvenc *self, AVCodecContext *video_codec_context) {
self->params.egl->glDeleteTextures(2, self->target_textures);
self->target_textures[0] = 0;
self->target_textures[1] = 0;
if(video_codec_context->hw_frames_ctx)
av_buffer_unref(&video_codec_context->hw_frames_ctx);
if(self->device_ctx)
av_buffer_unref(&self->device_ctx);
if(self->cuda.cu_ctx) {
for(int i = 0; i < 2; ++i) {
if(self->cuda_graphics_resources[i]) {
self->cuda.cuGraphicsUnmapResources(1, &self->cuda_graphics_resources[i], 0);
self->cuda.cuGraphicsUnregisterResource(self->cuda_graphics_resources[i]);
self->cuda_graphics_resources[i] = 0;
}
}
}
gsr_cuda_unload(&self->cuda);
}
static void gsr_video_encoder_nvenc_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
gsr_video_encoder_nvenc *self = encoder->priv;
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
CUDA_MEMCPY2D memcpy_struct;
memcpy_struct.srcXInBytes = 0;
memcpy_struct.srcY = 0;
memcpy_struct.srcMemoryType = CU_MEMORYTYPE_ARRAY;
memcpy_struct.dstXInBytes = 0;
memcpy_struct.dstY = 0;
memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
memcpy_struct.srcArray = self->mapped_arrays[i];
memcpy_struct.srcPitch = frame->width / div[i];
memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
memcpy_struct.dstPitch = frame->linesize[i];
memcpy_struct.WidthInBytes = frame->width * (self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? 2 : 1);
memcpy_struct.Height = frame->height / div[i];
// TODO: Remove this copy if possible
self->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, self->cuda_stream);
}
// TODO: needed?
self->cuda.cuStreamSynchronize(self->cuda_stream);
}
static void gsr_video_encoder_nvenc_get_textures(gsr_video_encoder *encoder, unsigned int *textures, vec2i *texture_sizes, int *num_textures, gsr_destination_color *destination_color) {
gsr_video_encoder_nvenc *self = encoder->priv;
textures[0] = self->target_textures[0];
textures[1] = self->target_textures[1];
texture_sizes[0] = self->target_texture_size[0];
texture_sizes[1] = self->target_texture_size[1];
*num_textures = 2;
*destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_nvenc_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
gsr_video_encoder_nvenc_stop(encoder->priv, video_codec_context);
free(encoder->priv);
free(encoder);
}
gsr_video_encoder* gsr_video_encoder_nvenc_create(const gsr_video_encoder_nvenc_params *params) {
gsr_video_encoder *encoder = calloc(1, sizeof(gsr_video_encoder));
if(!encoder)
return NULL;
gsr_video_encoder_nvenc *encoder_cuda = calloc(1, sizeof(gsr_video_encoder_nvenc));
if(!encoder_cuda) {
free(encoder);
return NULL;
}
encoder_cuda->params = *params;
*encoder = (gsr_video_encoder) {
.start = gsr_video_encoder_nvenc_start,
.copy_textures_to_frame = gsr_video_encoder_nvenc_copy_textures_to_frame,
.get_textures = gsr_video_encoder_nvenc_get_textures,
.destroy = gsr_video_encoder_nvenc_destroy,
.priv = encoder_cuda
};
return encoder;
}

View File

@ -0,0 +1,129 @@
#include "../../../include/encoder/video/software.h"
#include "../../../include/egl.h"
#include "../../../include/utils.h"
#include <libavcodec/avcodec.h>
#include <libavutil/frame.h>
#include <stdlib.h>
#define LINESIZE_ALIGNMENT 4
typedef struct {
gsr_video_encoder_software_params params;
unsigned int target_textures[2];
vec2i texture_sizes[2];
} gsr_video_encoder_software;
static bool gsr_video_encoder_software_setup_textures(gsr_video_encoder_software *self, AVCodecContext *video_codec_context, AVFrame *frame) {
int res = av_frame_get_buffer(frame, LINESIZE_ALIGNMENT);
if(res < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_software_setup_textures: av_frame_get_buffer failed: %d\n", res);
return false;
}
res = av_frame_make_writable(frame);
if(res < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_software_setup_textures: av_frame_make_writable failed: %d\n", res);
return false;
}
const unsigned int internal_formats_nv12[2] = { GL_R8, GL_RG8 };
const unsigned int internal_formats_p010[2] = { GL_R16, GL_RG16 };
const unsigned int formats[2] = { GL_RED, GL_RG };
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
self->texture_sizes[i] = (vec2i){ video_codec_context->width / div[i], video_codec_context->height / div[i] };
self->target_textures[i] = gl_create_texture(self->params.egl, self->texture_sizes[i].x, self->texture_sizes[i].y, self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i], GL_NEAREST);
if(self->target_textures[i] == 0) {
fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create opengl texture\n");
return false;
}
}
return true;
}
static void gsr_video_encoder_software_stop(gsr_video_encoder_software *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_software_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_video_encoder_software *self = encoder->priv;
video_codec_context->width = FFALIGN(video_codec_context->width, LINESIZE_ALIGNMENT);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
if(!gsr_video_encoder_software_setup_textures(self, video_codec_context, frame)) {
gsr_video_encoder_software_stop(self, video_codec_context);
return false;
}
return true;
}
void gsr_video_encoder_software_stop(gsr_video_encoder_software *self, AVCodecContext *video_codec_context) {
(void)video_codec_context;
self->params.egl->glDeleteTextures(2, self->target_textures);
self->target_textures[0] = 0;
self->target_textures[1] = 0;
}
static void gsr_video_encoder_software_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
(void)encoder;
//gsr_video_encoder_software *self = encoder->priv;
// TODO: hdr support
const unsigned int formats[2] = { GL_RED, GL_RG };
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
// TODO: Use glPixelStore?
gsr_color_conversion_read_destination_texture(color_conversion, i, 0, 0, frame->width / div[i], frame->height / div[i], formats[i], GL_UNSIGNED_BYTE, frame->data[i]);
}
// cap_kms->kms.base.egl->eglSwapBuffers(cap_kms->kms.base.egl->egl_display, cap_kms->kms.base.egl->egl_surface);
//self->params.egl->glFlush();
//self->params.egl->glFinish();
}
static void gsr_video_encoder_software_get_textures(gsr_video_encoder *encoder, unsigned int *textures, vec2i *texture_sizes, int *num_textures, gsr_destination_color *destination_color) {
gsr_video_encoder_software *self = encoder->priv;
textures[0] = self->target_textures[0];
textures[1] = self->target_textures[1];
texture_sizes[0] = self->texture_sizes[0];
texture_sizes[1] = self->texture_sizes[1];
*num_textures = 2;
*destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_software_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
gsr_video_encoder_software_stop(encoder->priv, video_codec_context);
free(encoder->priv);
free(encoder);
}
gsr_video_encoder* gsr_video_encoder_software_create(const gsr_video_encoder_software_params *params) {
gsr_video_encoder *encoder = calloc(1, sizeof(gsr_video_encoder));
if(!encoder)
return NULL;
gsr_video_encoder_software *encoder_software = calloc(1, sizeof(gsr_video_encoder_software));
if(!encoder_software) {
free(encoder);
return NULL;
}
encoder_software->params = *params;
*encoder = (gsr_video_encoder) {
.start = gsr_video_encoder_software_start,
.copy_textures_to_frame = gsr_video_encoder_software_copy_textures_to_frame,
.get_textures = gsr_video_encoder_software_get_textures,
.destroy = gsr_video_encoder_software_destroy,
.priv = encoder_software
};
return encoder;
}

347
src/encoder/video/vaapi.c Normal file
View File

@ -0,0 +1,347 @@
#include "../../../include/encoder/video/vaapi.h"
#include "../../../include/utils.h"
#include "../../../include/egl.h"
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_vaapi.h>
#include <libavutil/intreadwrite.h>
#include <va/va.h>
#include <va/va_drm.h>
#include <va/va_drmcommon.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
typedef struct {
gsr_video_encoder_vaapi_params params;
unsigned int target_textures[2];
vec2i texture_sizes[2];
AVBufferRef *device_ctx;
VADisplay va_dpy;
VADRMPRIMESurfaceDescriptor prime;
} gsr_video_encoder_vaapi;
static bool gsr_video_encoder_vaapi_setup_context(gsr_video_encoder_vaapi *self, AVCodecContext *video_codec_context) {
char render_path[128];
if(!gsr_card_path_get_render_path(self->params.egl->card_path, render_path)) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to get /dev/dri/renderDXXX file from %s\n", self->params.egl->card_path);
return false;
}
if(av_hwdevice_ctx_create(&self->device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to create hardware device context\n");
return false;
}
AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
if(!frame_context) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to create hwframe context\n");
av_buffer_unref(&self->device_ctx);
return false;
}
AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
hw_frame_context->width = video_codec_context->width;
hw_frame_context->height = video_codec_context->height;
hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
hw_frame_context->format = video_codec_context->pix_fmt;
hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
//hw_frame_context->initial_pool_size = 20;
AVVAAPIDeviceContext *vactx = ((AVHWDeviceContext*)self->device_ctx->data)->hwctx;
self->va_dpy = vactx->display;
if (av_hwframe_ctx_init(frame_context) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to initialize hardware frame context "
"(note: ffmpeg version needs to be > 4.0)\n");
av_buffer_unref(&self->device_ctx);
//av_buffer_unref(&frame_context);
return false;
}
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
av_buffer_unref(&frame_context);
return true;
}
static uint32_t fourcc(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
return (d << 24) | (c << 16) | (b << 8) | a;
}
static bool gsr_video_encoder_vaapi_setup_textures(gsr_video_encoder_vaapi *self, AVCodecContext *video_codec_context, AVFrame *frame) {
const int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0);
if(res < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_textures: av_hwframe_get_buffer failed: %d\n", res);
return false;
}
VASurfaceID target_surface_id = (uintptr_t)frame->data[3];
VAStatus va_status = vaExportSurfaceHandle(self->va_dpy, target_surface_id, VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, VA_EXPORT_SURFACE_WRITE_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, &self->prime);
if(va_status != VA_STATUS_SUCCESS) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_textures: vaExportSurfaceHandle failed, error: %d\n", va_status);
return false;
}
vaSyncSurface(self->va_dpy, target_surface_id);
const uint32_t formats_nv12[2] = { fourcc('R', '8', ' ', ' '), fourcc('G', 'R', '8', '8') };
const uint32_t formats_p010[2] = { fourcc('R', '1', '6', ' '), fourcc('G', 'R', '3', '2') };
if(self->prime.fourcc == VA_FOURCC_NV12 || self->prime.fourcc == VA_FOURCC_P010) {
const uint32_t *formats = self->prime.fourcc == VA_FOURCC_NV12 ? formats_nv12 : formats_p010;
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
self->params.egl->glGenTextures(2, self->target_textures);
for(int i = 0; i < 2; ++i) {
const int layer = i;
int fds[4];
uint32_t offsets[4];
uint32_t pitches[4];
uint64_t modifiers[4];
for(uint32_t j = 0; j < self->prime.layers[layer].num_planes; ++j) {
// TODO: Close these? in _stop, using self->prime
fds[j] = self->prime.objects[self->prime.layers[layer].object_index[j]].fd;
offsets[j] = self->prime.layers[layer].offset[j];
pitches[j] = self->prime.layers[layer].pitch[j];
modifiers[j] = self->prime.objects[self->prime.layers[layer].object_index[j]].drm_format_modifier;
}
intptr_t img_attr[44];
setup_dma_buf_attrs(img_attr, formats[i], self->prime.width / div[i], self->prime.height / div[i],
fds, offsets, pitches, modifiers, self->prime.layers[layer].num_planes, true);
self->texture_sizes[i] = (vec2i){ self->prime.width / div[i], self->prime.height / div[i] };
while(self->params.egl->eglGetError() != EGL_SUCCESS){}
EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
if(!image) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_textures: failed to create egl image from drm fd for output drm fd, error: %d\n", self->params.egl->eglGetError());
return false;
}
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
while(self->params.egl->glGetError()) {}
while(self->params.egl->eglGetError() != EGL_SUCCESS){}
self->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
if(self->params.egl->glGetError() != 0 || self->params.egl->eglGetError() != EGL_SUCCESS) {
// TODO: Get the error properly
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_textures: failed to bind egl image to gl texture, error: %d\n", self->params.egl->eglGetError());
self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
return false;
}
self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
}
return true;
} else {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_textures: unexpected fourcc %u for output drm fd, expected nv12 or p010\n", self->prime.fourcc);
return false;
}
}
static void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext *video_codec_context);
static bool supports_hevc_without_padding(const char *card_path) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61, 28, 100) && VA_CHECK_VERSION(1, 21, 0)
VAStatus va_status;
VAConfigID va_config = 0;
unsigned int num_surface_attr = 0;
VASurfaceAttrib *surface_attr_list = NULL;
bool supports_surface_attrib_alignment_size = false;
int va_major = 0;
int va_minor = 0;
bool initialized = false;
char render_path[128];
if(!gsr_card_path_get_render_path(card_path, render_path)) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
return false;
}
const int drm_fd = open(render_path, O_RDWR);
if(drm_fd == -1) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: failed to open device %s\n", render_path);
return false;
}
const VADisplay va_dpy = vaGetDisplayDRM(drm_fd);
if(!va_dpy) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: failed to get vaapi display for device %s\n", render_path);
goto done;
}
vaSetInfoCallback(va_dpy, NULL, NULL);
if(vaInitialize(va_dpy, &va_major, &va_minor) != VA_STATUS_SUCCESS) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: vaInitialize failed\n");
goto done;
}
initialized = true;
va_status = vaCreateConfig(va_dpy, VAProfileHEVCMain, VAEntrypointEncSlice, NULL, 0, &va_config);
if(va_status != VA_STATUS_SUCCESS) {
va_status = vaCreateConfig(va_dpy, VAProfileHEVCMain, VAEntrypointEncSliceLP, NULL, 0, &va_config);
if(va_status != VA_STATUS_SUCCESS) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: failed to create hevc vaapi config, error: %s (%d)\n", vaErrorStr(va_status), va_status);
return false;
}
}
va_status = vaQuerySurfaceAttributes(va_dpy, va_config, 0, &num_surface_attr);
if(va_status != VA_STATUS_SUCCESS) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: failed to query vaapi surface attributes size, error: %s (%d)\n", vaErrorStr(va_status), va_status);
goto done;
}
surface_attr_list = malloc(num_surface_attr * sizeof(VASurfaceAttrib));
if(!surface_attr_list) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: failed to allocate memory for %u vaapi surface attributes, error: %s (%d)\n", num_surface_attr, vaErrorStr(va_status), va_status);
goto done;
}
va_status = vaQuerySurfaceAttributes(va_dpy, va_config, surface_attr_list, &num_surface_attr);
if(va_status != VA_STATUS_SUCCESS) {
fprintf(stderr, "gsr error: supports_hevc_without_padding: failed to query vaapi surface attributes data, error: %s (%d)\n", vaErrorStr(va_status), va_status);
goto done;
}
for(unsigned int i = 0; i < num_surface_attr; ++i) {
if(surface_attr_list[i].type == VASurfaceAttribAlignmentSize) {
supports_surface_attrib_alignment_size = true;
break;
}
}
done:
free(surface_attr_list);
if(va_config > 0)
vaDestroyConfig(va_dpy, va_config);
if(initialized)
vaTerminate(va_dpy);
if(drm_fd > 0)
close(drm_fd);
return supports_surface_attrib_alignment_size;
#else
return false;
#endif
}
static bool gsr_video_encoder_vaapi_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_video_encoder_vaapi *self = encoder->priv;
if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_HEVC) {
if(supports_hevc_without_padding(self->params.egl->card_path)) {
video_codec_context->width = FFALIGN(video_codec_context->width, 2);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
} else {
video_codec_context->width = FFALIGN(video_codec_context->width, 256);
video_codec_context->height = FFALIGN(video_codec_context->height, 256);
}
} else if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
// TODO: Dont do this for VCN 5 and forward which should fix this hardware bug
video_codec_context->width = FFALIGN(video_codec_context->width, 64);
// AMD driver has special case handling for 1080 height to set it to 1082 instead of 1088 (1080 aligned to 16).
// TODO: Set height to 1082 in this case, but it wont work because it will be aligned to 1088.
if(video_codec_context->height == 1080) {
video_codec_context->height = 1080;
} else {
video_codec_context->height = FFALIGN(video_codec_context->height, 16);
}
} else {
video_codec_context->width = FFALIGN(video_codec_context->width, 2);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
}
if(FFALIGN(video_codec_context->width, 2) != FFALIGN(frame->width, 2) || FFALIGN(video_codec_context->height, 2) != FFALIGN(frame->height, 2)) {
fprintf(stderr, "gsr warning: gsr_video_encoder_vaapi_start: black bars have been added to the video because of a bug in AMD drivers/hardware. Record with h264 codec instead (-k h264) to get around this issue\n");
}
if(video_codec_context->width < 128)
video_codec_context->width = 128;
if(video_codec_context->height < 128)
video_codec_context->height = 128;
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
if(!gsr_video_encoder_vaapi_setup_context(self, video_codec_context)) {
gsr_video_encoder_vaapi_stop(self, video_codec_context);
return false;
}
if(!gsr_video_encoder_vaapi_setup_textures(self, video_codec_context, frame)) {
gsr_video_encoder_vaapi_stop(self, video_codec_context);
return false;
}
return true;
}
void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext *video_codec_context) {
self->params.egl->glDeleteTextures(2, self->target_textures);
self->target_textures[0] = 0;
self->target_textures[1] = 0;
if(video_codec_context->hw_frames_ctx)
av_buffer_unref(&video_codec_context->hw_frames_ctx);
if(self->device_ctx)
av_buffer_unref(&self->device_ctx);
for(uint32_t i = 0; i < self->prime.num_objects; ++i) {
if(self->prime.objects[i].fd > 0) {
close(self->prime.objects[i].fd);
self->prime.objects[i].fd = 0;
}
}
}
static void gsr_video_encoder_vaapi_get_textures(gsr_video_encoder *encoder, unsigned int *textures, vec2i *texture_sizes, int *num_textures, gsr_destination_color *destination_color) {
gsr_video_encoder_vaapi *self = encoder->priv;
textures[0] = self->target_textures[0];
textures[1] = self->target_textures[1];
texture_sizes[0] = self->texture_sizes[0];
texture_sizes[1] = self->texture_sizes[1];
*num_textures = 2;
*destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_vaapi_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
gsr_video_encoder_vaapi_stop(encoder->priv, video_codec_context);
free(encoder->priv);
free(encoder);
}
gsr_video_encoder* gsr_video_encoder_vaapi_create(const gsr_video_encoder_vaapi_params *params) {
gsr_video_encoder *encoder = calloc(1, sizeof(gsr_video_encoder));
if(!encoder)
return NULL;
gsr_video_encoder_vaapi *encoder_vaapi = calloc(1, sizeof(gsr_video_encoder_vaapi));
if(!encoder_vaapi) {
free(encoder);
return NULL;
}
encoder_vaapi->params = *params;
*encoder = (gsr_video_encoder) {
.start = gsr_video_encoder_vaapi_start,
.get_textures = gsr_video_encoder_vaapi_get_textures,
.destroy = gsr_video_encoder_vaapi_destroy,
.priv = encoder_vaapi
};
return encoder;
}

28
src/encoder/video/video.c Normal file
View File

@ -0,0 +1,28 @@
#include "../../../include/encoder/video/video.h"
#include <assert.h>
bool gsr_video_encoder_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
assert(!encoder->started);
bool res = encoder->start(encoder, video_codec_context, frame);
if(res)
encoder->started = true;
return res;
}
void gsr_video_encoder_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
assert(encoder->started);
encoder->started = false;
encoder->destroy(encoder, video_codec_context);
}
void gsr_video_encoder_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
assert(encoder->started);
if(encoder->copy_textures_to_frame)
encoder->copy_textures_to_frame(encoder, frame, color_conversion);
}
void gsr_video_encoder_get_textures(gsr_video_encoder *encoder, unsigned int *textures, vec2i *texture_sizes, int *num_textures, gsr_destination_color *destination_color) {
assert(encoder->started);
encoder->get_textures(encoder, textures, texture_sizes, num_textures, destination_color);
}

315
src/encoder/video/vulkan.c Normal file
View File

@ -0,0 +1,315 @@
#include "../../../include/encoder/video/vulkan.h"
#include "../../../include/utils.h"
#include "../../../include/egl.h"
#include <libavcodec/avcodec.h>
#define VK_NO_PROTOTYPES
//#include <libavutil/hwcontext_vulkan.h>
//#include <vulkan/vulkan_core.h>
#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
#define GL_TEXTURE_TILING_EXT 0x9580
#define GL_OPTIMAL_TILING_EXT 0x9584
#define GL_LINEAR_TILING_EXT 0x9585
typedef struct {
gsr_video_encoder_vulkan_params params;
unsigned int target_textures[2];
vec2i texture_sizes[2];
AVBufferRef *device_ctx;
} gsr_video_encoder_vulkan;
static bool gsr_video_encoder_vulkan_setup_context(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context) {
AVDictionary *options = NULL;
//av_dict_set(&options, "linear_images", "1", 0);
//av_dict_set(&options, "disable_multiplane", "1", 0);
#if 0
// TODO: Use correct device
if(av_hwdevice_ctx_create(&self->device_ctx, AV_HWDEVICE_TYPE_VULKAN, NULL, options, 0) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to create hardware device context\n");
return false;
}
AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
if(!frame_context) {
fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to create hwframe context\n");
av_buffer_unref(&self->device_ctx);
return false;
}
AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
hw_frame_context->width = video_codec_context->width;
hw_frame_context->height = video_codec_context->height;
hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
hw_frame_context->format = video_codec_context->pix_fmt;
hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
//AVVulkanFramesContext *vk_frame_ctx = (AVVulkanFramesContext*)hw_frame_context->hwctx;
//hw_frame_context->initial_pool_size = 20;
if (av_hwframe_ctx_init(frame_context) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to initialize hardware frame context "
"(note: ffmpeg version needs to be > 4.0)\n");
av_buffer_unref(&self->device_ctx);
//av_buffer_unref(&frame_context);
return false;
}
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
av_buffer_unref(&frame_context);
#endif
return true;
}
#if 0
static AVVulkanDeviceContext* video_codec_context_get_vulkan_data(AVCodecContext *video_codec_context) {
AVBufferRef *hw_frames_ctx = video_codec_context->hw_frames_ctx;
if(!hw_frames_ctx)
return NULL;
AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)hw_frames_ctx->data;
AVHWDeviceContext *device_context = (AVHWDeviceContext*)hw_frame_context->device_ctx;
if(device_context->type != AV_HWDEVICE_TYPE_VULKAN)
return NULL;
return (AVVulkanDeviceContext*)device_context->hwctx;
}
static uint32_t get_memory_type_idx(VkPhysicalDevice pdev, const VkMemoryRequirements *mem_reqs, VkMemoryPropertyFlagBits prop_flags, PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties) {
VkPhysicalDeviceMemoryProperties pdev_mem_props;
uint32_t i;
vkGetPhysicalDeviceMemoryProperties(pdev, &pdev_mem_props);
for (i = 0; i < pdev_mem_props.memoryTypeCount; i++) {
const VkMemoryType *type = &pdev_mem_props.memoryTypes[i];
if ((mem_reqs->memoryTypeBits & (1 << i)) &&
(type->propertyFlags & prop_flags) == prop_flags) {
return i;
break;
}
}
return UINT32_MAX;
}
#endif
static bool gsr_video_encoder_vulkan_setup_textures(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context, AVFrame *frame) {
const int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0);
if(res < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_textures: av_hwframe_get_buffer failed: %d\n", res);
return false;
}
while(self->params.egl->glGetError()) {}
#if 0
AVVkFrame *target_surface_id = (AVVkFrame*)frame->data[0];
AVVulkanDeviceContext* vv = video_codec_context_get_vulkan_data(video_codec_context);
const size_t luma_size = frame->width * frame->height;
if(vv) {
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vv->get_proc_addr(vv->inst, "vkGetImageMemoryRequirements");
PFN_vkAllocateMemory vkAllocateMemory = (PFN_vkAllocateMemory)vv->get_proc_addr(vv->inst, "vkAllocateMemory");
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vv->get_proc_addr(vv->inst, "vkGetPhysicalDeviceMemoryProperties");
PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vv->get_proc_addr(vv->inst, "vkGetMemoryFdKHR");
VkMemoryRequirements mem_reqs = {0};
vkGetImageMemoryRequirements(vv->act_dev, target_surface_id->img[0], &mem_reqs);
fprintf(stderr, "size: %lu, alignment: %lu, memory bits: 0x%08x\n", mem_reqs.size, mem_reqs.alignment, mem_reqs.memoryTypeBits);
VkDeviceMemory mem;
{
VkExportMemoryAllocateInfo exp_mem_info;
VkMemoryAllocateInfo mem_alloc_info;
VkMemoryDedicatedAllocateInfoKHR ded_info;
memset(&exp_mem_info, 0, sizeof(exp_mem_info));
exp_mem_info.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
exp_mem_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
memset(&ded_info, 0, sizeof(ded_info));
ded_info.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
ded_info.image = target_surface_id->img[0];
exp_mem_info.pNext = &ded_info;
memset(&mem_alloc_info, 0, sizeof(mem_alloc_info));
mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc_info.pNext = &exp_mem_info;
mem_alloc_info.allocationSize = target_surface_id->size[0];
mem_alloc_info.memoryTypeIndex = get_memory_type_idx(vv->phys_dev, &mem_reqs, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, vkGetPhysicalDeviceMemoryProperties);
if (mem_alloc_info.memoryTypeIndex == UINT32_MAX) {
fprintf(stderr, "No suitable memory type index found.\n");
return VK_NULL_HANDLE;
}
if (vkAllocateMemory(vv->act_dev, &mem_alloc_info, 0, &mem) !=
VK_SUCCESS)
return VK_NULL_HANDLE;
fprintf(stderr, "memory: %p\n", (void*)mem);
}
fprintf(stderr, "target surface id: %p, %zu, %zu\n", (void*)target_surface_id->mem[0], target_surface_id->offset[0], target_surface_id->offset[1]);
fprintf(stderr, "vkGetMemoryFdKHR: %p\n", (void*)vkGetMemoryFdKHR);
int fd = 0;
VkMemoryGetFdInfoKHR fd_info;
memset(&fd_info, 0, sizeof(fd_info));
fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
fd_info.memory = target_surface_id->mem[0];
fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
if(vkGetMemoryFdKHR(vv->act_dev, &fd_info, &fd) != VK_SUCCESS) {
fprintf(stderr, "failed!\n");
} else {
fprintf(stderr, "fd: %d\n", fd);
}
fprintf(stderr, "glImportMemoryFdEXT: %p, size: %zu\n", (void*)self->params.egl->glImportMemoryFdEXT, target_surface_id->size[0]);
const int tiling = target_surface_id->tiling == VK_IMAGE_TILING_LINEAR ? GL_LINEAR_TILING_EXT : GL_OPTIMAL_TILING_EXT;
if(tiling != GL_OPTIMAL_TILING_EXT) {
fprintf(stderr, "tiling %d is not supported, only GL_OPTIMAL_TILING_EXT (%d) is supported\n", tiling, GL_OPTIMAL_TILING_EXT);
}
unsigned int gl_memory_obj = 0;
self->params.egl->glCreateMemoryObjectsEXT(1, &gl_memory_obj);
//const int dedicated = GL_TRUE;
//self->params.egl->glMemoryObjectParameterivEXT(gl_memory_obj, GL_DEDICATED_MEMORY_OBJECT_EXT, &dedicated);
self->params.egl->glImportMemoryFdEXT(gl_memory_obj, target_surface_id->size[0], GL_HANDLE_TYPE_OPAQUE_FD_EXT, fd);
if(!self->params.egl->glIsMemoryObjectEXT(gl_memory_obj))
fprintf(stderr, "failed to create object!\n");
fprintf(stderr, "gl memory obj: %u, error: %d\n", gl_memory_obj, self->params.egl->glGetError());
// fprintf(stderr, "0 gl error: %d\n", self->params.egl->glGetError());
// unsigned int vertex_buffer = 0;
// self->params.egl->glGenBuffers(1, &vertex_buffer);
// self->params.egl->glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
// self->params.egl->glBufferStorageMemEXT(GL_ARRAY_BUFFER, target_surface_id->size[0], gl_memory_obj, target_surface_id->offset[0]);
// fprintf(stderr, "1 gl error: %d\n", self->params.egl->glGetError());
// fprintf(stderr, "0 gl error: %d\n", self->params.egl->glGetError());
// unsigned int buffer = 0;
// self->params.egl->glCreateBuffers(1, &buffer);
// self->params.egl->glNamedBufferStorageMemEXT(buffer, target_surface_id->size[0], gl_memory_obj, target_surface_id->offset[0]);
// fprintf(stderr, "1 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glGenTextures(1, &self->target_textures[0]);
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[0]);
fprintf(stderr, "1 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_TILING_EXT, tiling);
fprintf(stderr, "tiling: %d\n", tiling);
fprintf(stderr, "2 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glTexStorageMem2DEXT(GL_TEXTURE_2D, 1, GL_R8, frame->width, frame->height, gl_memory_obj, target_surface_id->offset[0]);
fprintf(stderr, "3 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
self->params.egl->glGenTextures(1, &self->target_textures[1]);
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[1]);
fprintf(stderr, "1 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_TILING_EXT, tiling);
fprintf(stderr, "tiling: %d\n", tiling);
fprintf(stderr, "2 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glTexStorageMem2DEXT(GL_TEXTURE_2D, 1, GL_RG8, frame->width/2, frame->height/2, gl_memory_obj, target_surface_id->offset[0] + luma_size);
fprintf(stderr, "3 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
self->texture_sizes[0] = (vec2i){ frame->width, frame->height };
self->texture_sizes[1] = (vec2i){ frame->width/2, frame->height/2 };
}
#endif
return true;
}
static void gsr_video_encoder_vulkan_stop(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_vulkan_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_video_encoder_vulkan *self = encoder->priv;
video_codec_context->width = FFALIGN(video_codec_context->width, 2);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
if(video_codec_context->width < 128)
video_codec_context->width = 128;
if(video_codec_context->height < 128)
video_codec_context->height = 128;
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
if(!gsr_video_encoder_vulkan_setup_context(self, video_codec_context)) {
gsr_video_encoder_vulkan_stop(self, video_codec_context);
return false;
}
if(!gsr_video_encoder_vulkan_setup_textures(self, video_codec_context, frame)) {
gsr_video_encoder_vulkan_stop(self, video_codec_context);
return false;
}
return true;
}
void gsr_video_encoder_vulkan_stop(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context) {
self->params.egl->glDeleteTextures(2, self->target_textures);
self->target_textures[0] = 0;
self->target_textures[1] = 0;
if(video_codec_context->hw_frames_ctx)
av_buffer_unref(&video_codec_context->hw_frames_ctx);
if(self->device_ctx)
av_buffer_unref(&self->device_ctx);
}
static void gsr_video_encoder_vulkan_get_textures(gsr_video_encoder *encoder, unsigned int *textures, vec2i *texture_sizes, int *num_textures, gsr_destination_color *destination_color) {
gsr_video_encoder_vulkan *self = encoder->priv;
textures[0] = self->target_textures[0];
textures[1] = self->target_textures[1];
texture_sizes[0] = self->texture_sizes[0];
texture_sizes[1] = self->texture_sizes[1];
*num_textures = 2;
*destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_vulkan_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
gsr_video_encoder_vulkan_stop(encoder->priv, video_codec_context);
free(encoder->priv);
free(encoder);
}
gsr_video_encoder* gsr_video_encoder_vulkan_create(const gsr_video_encoder_vulkan_params *params) {
gsr_video_encoder *encoder = calloc(1, sizeof(gsr_video_encoder));
if(!encoder)
return NULL;
gsr_video_encoder_vulkan *encoder_vulkan = calloc(1, sizeof(gsr_video_encoder_vulkan));
if(!encoder_vulkan) {
free(encoder);
return NULL;
}
encoder_vulkan->params = *params;
*encoder = (gsr_video_encoder) {
.start = gsr_video_encoder_vulkan_start,
.copy_textures_to_frame = NULL,
.get_textures = gsr_video_encoder_vulkan_get_textures,
.destroy = gsr_video_encoder_vulkan_destroy,
.priv = encoder_vulkan
};
return encoder;
}

83
src/image_writer.c Normal file
View File

@ -0,0 +1,83 @@
#include "../include/image_writer.h"
#include "../include/egl.h"
#include "../include/utils.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../external/stb_image_write.h"
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
/* TODO: Support hdr/10-bit */
bool gsr_image_writer_init_opengl(gsr_image_writer *self, gsr_egl *egl, int width, int height) {
memset(self, 0, sizeof(*self));
self->egl = egl;
self->width = width;
self->height = height;
self->texture = gl_create_texture(self->egl, self->width, self->height, GL_RGBA8, GL_RGBA, GL_NEAREST); /* TODO: use GL_RGB16 instead of GL_RGB8 for hdr/10-bit */
if(self->texture == 0) {
fprintf(stderr, "gsr error: gsr_image_writer_init: failed to create texture\n");
return false;
}
return true;
}
void gsr_image_writer_deinit(gsr_image_writer *self) {
if(self->texture) {
self->egl->glDeleteTextures(1, &self->texture);
self->texture = 0;
}
}
static bool gsr_image_writer_write_memory_to_file(gsr_image_writer *self, const char *filepath, gsr_image_format image_format, int quality, const void *data) {
if(quality < 1)
quality = 1;
else if(quality > 100)
quality = 100;
bool success = false;
switch(image_format) {
case GSR_IMAGE_FORMAT_JPEG:
success = stbi_write_jpg(filepath, self->width, self->height, 4, data, quality);
break;
case GSR_IMAGE_FORMAT_PNG:
success = stbi_write_png(filepath, self->width, self->height, 4, data, 0);
break;
}
if(!success)
fprintf(stderr, "gsr error: gsr_image_writer_write_to_file: failed to write image data to output file %s\n", filepath);
return success;
}
static bool gsr_image_writer_write_opengl_texture_to_file(gsr_image_writer *self, const char *filepath, gsr_image_format image_format, int quality) {
uint8_t *frame_data = malloc(self->width * self->height * 4);
if(!frame_data) {
fprintf(stderr, "gsr error: gsr_image_writer_write_to_file: failed to allocate memory for image frame\n");
return false;
}
unsigned int fbo = 0;
self->egl->glGenFramebuffers(1, &fbo);
self->egl->glBindFramebuffer(GL_FRAMEBUFFER, fbo);
self->egl->glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self->texture, 0);
self->egl->glReadPixels(0, 0, self->width, self->height, GL_RGBA, GL_UNSIGNED_BYTE, frame_data);
self->egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
self->egl->glDeleteFramebuffers(1, &fbo);
self->egl->glFlush();
self->egl->glFinish();
const bool success = gsr_image_writer_write_memory_to_file(self, filepath, image_format, quality, frame_data);
free(frame_data);
return success;
}
bool gsr_image_writer_write_to_file(gsr_image_writer *self, const char *filepath, gsr_image_format image_format, int quality) {
return gsr_image_writer_write_opengl_texture_to_file(self, filepath, image_format, quality);
}

34
src/library_loader.c Normal file
View File

@ -0,0 +1,34 @@
#include "../include/library_loader.h"
#include <dlfcn.h>
#include <stdbool.h>
#include <stdio.h>
void* dlsym_print_fail(void *handle, const char *name, bool required) {
dlerror();
void *sym = dlsym(handle, name);
char *err_str = dlerror();
if(!sym)
fprintf(stderr, "%s: dlsym(handle, \"%s\") failed, error: %s\n", required ? "error" : "warning", name, err_str ? err_str : "(null)");
return sym;
}
/* |dlsyms| should be null terminated */
bool dlsym_load_list(void *handle, const dlsym_assign *dlsyms) {
bool success = true;
for(int i = 0; dlsyms[i].func; ++i) {
*dlsyms[i].func = dlsym_print_fail(handle, dlsyms[i].name, true);
if(!*dlsyms[i].func)
success = false;
}
return success;
}
/* |dlsyms| should be null terminated */
void dlsym_load_list_optional(void *handle, const dlsym_assign *dlsyms) {
for(int i = 0; dlsyms[i].func; ++i) {
*dlsyms[i].func = dlsym_print_fail(handle, dlsyms[i].name, false);
}
}

4629
src/main.cpp Normal file

File diff suppressed because it is too large Load Diff

279
src/overclock.c Normal file
View File

@ -0,0 +1,279 @@
#include "../include/overclock.h"
#include <X11/Xlib.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
// HACK!!!: When a program uses cuda (including nvenc) then the nvidia driver drops to max performance level - 1 (memory transfer rate is dropped and possibly graphics clock).
// Nvidia does this because in some very extreme cases of cuda there can be memory corruption when running at max memory transfer rate.
// So to get around this we overclock memory transfer rate (maybe this should also be done for graphics clock?) to the best performance level while GPU Screen Recorder is running.
static int min_int(int a, int b) {
return a < b ? a : b;
}
// Fields are 0 if not set
typedef struct {
int perf;
int nv_clock;
int nv_clock_min;
int nv_clock_max;
int mem_clock;
int mem_clock_min;
int mem_clock_max;
int mem_transfer_rate;
int mem_transfer_rate_min;
int mem_transfer_rate_max;
} NVCTRLPerformanceLevel;
#define MAX_PERFORMANCE_LEVELS 12
typedef struct {
NVCTRLPerformanceLevel performance_level[MAX_PERFORMANCE_LEVELS];
int num_performance_levels;
} NVCTRLPerformanceLevelQuery;
typedef void (*split_callback)(const char *str, size_t size, void *userdata);
static void split_by_delimiter(const char *str, size_t size, char delimiter, split_callback callback, void *userdata) {
const char *it = str;
while(it < str + size) {
const char *prev_it = it;
it = memchr(it, delimiter, (str + size) - it);
if(!it)
it = str + size;
callback(prev_it, it - prev_it, userdata);
it += 1; // skip delimiter
}
}
typedef enum {
NVCTRL_GPU_NVCLOCK,
NVCTRL_ATTRIB_GPU_MEM_TRANSFER_RATE,
} NvCTRLAttributeType;
static unsigned int attribute_type_to_attribute_param(NvCTRLAttributeType attribute_type) {
switch(attribute_type) {
case NVCTRL_GPU_NVCLOCK:
return NV_CTRL_GPU_NVCLOCK_OFFSET;
case NVCTRL_ATTRIB_GPU_MEM_TRANSFER_RATE:
return NV_CTRL_GPU_MEM_TRANSFER_RATE_OFFSET;
}
return 0;
}
static unsigned int attribute_type_to_attribute_param_all_levels(NvCTRLAttributeType attribute_type) {
switch(attribute_type) {
case NVCTRL_GPU_NVCLOCK:
return NV_CTRL_GPU_NVCLOCK_OFFSET_ALL_PERFORMANCE_LEVELS;
case NVCTRL_ATTRIB_GPU_MEM_TRANSFER_RATE:
return NV_CTRL_GPU_MEM_TRANSFER_RATE_OFFSET_ALL_PERFORMANCE_LEVELS;
}
return 0;
}
// Returns 0 on error
static int xnvctrl_get_attribute_max_value(gsr_xnvctrl *xnvctrl, int num_performance_levels, NvCTRLAttributeType attribute_type) {
NVCTRLAttributeValidValuesRec valid;
if(xnvctrl->XNVCTRLQueryValidTargetAttributeValues(xnvctrl->display, NV_CTRL_TARGET_TYPE_GPU, 0, 0, attribute_type_to_attribute_param_all_levels(attribute_type), &valid)) {
return valid.u.range.max;
}
if(num_performance_levels > 0 && xnvctrl->XNVCTRLQueryValidTargetAttributeValues(xnvctrl->display, NV_CTRL_TARGET_TYPE_GPU, 0, num_performance_levels - 1, attribute_type_to_attribute_param(attribute_type), &valid)) {
return valid.u.range.max;
}
return 0;
}
static bool xnvctrl_set_attribute_offset(gsr_xnvctrl *xnvctrl, int num_performance_levels, int offset, NvCTRLAttributeType attribute_type) {
bool success = false;
// NV_CTRL_GPU_MEM_TRANSFER_RATE_OFFSET_ALL_PERFORMANCE_LEVELS works (or at least used to?) without Xorg running as root
// so we try that first. NV_CTRL_GPU_MEM_TRANSFER_RATE_OFFSET_ALL_PERFORMANCE_LEVELS also only works with GTX 1000+.
// TODO: Reverse engineer NVIDIA Xorg driver so we can set this always without root access.
if(xnvctrl->XNVCTRLSetTargetAttributeAndGetStatus(xnvctrl->display, NV_CTRL_TARGET_TYPE_GPU, 0, 0, attribute_type_to_attribute_param_all_levels(attribute_type), offset))
success = true;
for(int i = 0; i < num_performance_levels; ++i) {
success |= xnvctrl->XNVCTRLSetTargetAttributeAndGetStatus(xnvctrl->display, NV_CTRL_TARGET_TYPE_GPU, 0, i, attribute_type_to_attribute_param(attribute_type), offset);
}
return success;
}
static void strip(const char **str, int *size) {
const char *str_d = *str;
int s_d = *size;
const char *start = str_d;
const char *end = start + s_d;
while(str_d < end) {
char c = *str_d;
if(c != ' ' && c != '\t' && c != '\n')
break;
++str_d;
}
int start_offset = str_d - start;
while(s_d > start_offset) {
char c = start[s_d];
if(c != ' ' && c != '\t' && c != '\n')
break;
--s_d;
}
*str = str_d;
*size = s_d;
}
static void attribute_callback(const char *str, size_t size, void *userdata) {
if(size > 255 - 1)
return;
int size_i = size;
strip(&str, &size_i);
char attribute[255];
memcpy(attribute, str, size_i);
attribute[size_i] = '\0';
const char *sep = strchr(attribute, '=');
if(!sep)
return;
const char *attribute_name = attribute;
size_t attribute_name_len = sep - attribute_name;
const char *attribute_value_str = sep + 1;
int attribute_value = 0;
if(sscanf(attribute_value_str, "%d", &attribute_value) != 1)
return;
NVCTRLPerformanceLevel *performance_level = userdata;
if(attribute_name_len == 4 && memcmp(attribute_name, "perf", 4) == 0)
performance_level->perf = attribute_value;
else if(attribute_name_len == 7 && memcmp(attribute_name, "nvclock", 7) == 0)
performance_level->nv_clock = attribute_value;
else if(attribute_name_len == 10 && memcmp(attribute_name, "nvclockmin", 10) == 0)
performance_level->nv_clock_min = attribute_value;
else if(attribute_name_len == 10 && memcmp(attribute_name, "nvclockmax", 10) == 0)
performance_level->nv_clock_max = attribute_value;
else if(attribute_name_len == 8 && memcmp(attribute_name, "memclock", 8) == 0)
performance_level->mem_clock = attribute_value;
else if(attribute_name_len == 11 && memcmp(attribute_name, "memclockmin", 11) == 0)
performance_level->mem_clock_min = attribute_value;
else if(attribute_name_len == 11 && memcmp(attribute_name, "memclockmax", 11) == 0)
performance_level->mem_clock_max = attribute_value;
else if(attribute_name_len == 15 && memcmp(attribute_name, "memTransferRate", 15) == 0)
performance_level->mem_transfer_rate = attribute_value;
else if(attribute_name_len == 18 && memcmp(attribute_name, "memTransferRatemin", 18) == 0)
performance_level->mem_transfer_rate_min = attribute_value;
else if(attribute_name_len == 18 && memcmp(attribute_name, "memTransferRatemax", 18) == 0)
performance_level->mem_transfer_rate_max = attribute_value;
}
static void attribute_line_callback(const char *str, size_t size, void *userdata) {
NVCTRLPerformanceLevelQuery *query = userdata;
if(query->num_performance_levels >= MAX_PERFORMANCE_LEVELS)
return;
NVCTRLPerformanceLevel *current_performance_level = &query->performance_level[query->num_performance_levels];
memset(current_performance_level, 0, sizeof(NVCTRLPerformanceLevel));
++query->num_performance_levels;
split_by_delimiter(str, size, ',', attribute_callback, current_performance_level);
}
static bool xnvctrl_get_performance_levels(gsr_xnvctrl *xnvctrl, NVCTRLPerformanceLevelQuery *query) {
bool success = false;
memset(query, 0, sizeof(NVCTRLPerformanceLevelQuery));
char *attributes = NULL;
if(!xnvctrl->XNVCTRLQueryTargetStringAttribute(xnvctrl->display, NV_CTRL_TARGET_TYPE_GPU, 0, 0, NV_CTRL_STRING_PERFORMANCE_MODES, &attributes)) {
success = false;
goto done;
}
split_by_delimiter(attributes, strlen(attributes), ';', attribute_line_callback, query);
success = true;
done:
if(attributes)
XFree(attributes);
return success;
}
static int compare_mem_transfer_rate_max_asc(const void *a, const void *b) {
const NVCTRLPerformanceLevel *perf_a = a;
const NVCTRLPerformanceLevel *perf_b = b;
return perf_a->mem_transfer_rate_max - perf_b->mem_transfer_rate_max;
}
bool gsr_overclock_load(gsr_overclock *self, Display *display) {
memset(self, 0, sizeof(gsr_overclock));
self->num_performance_levels = 0;
return gsr_xnvctrl_load(&self->xnvctrl, display);
}
void gsr_overclock_unload(gsr_overclock *self) {
gsr_xnvctrl_unload(&self->xnvctrl);
}
bool gsr_overclock_start(gsr_overclock *self) {
int basep = 0;
int errorp = 0;
if(!self->xnvctrl.XNVCTRLQueryExtension(self->xnvctrl.display, &basep, &errorp)) {
fprintf(stderr, "gsr warning: gsr_overclock_start: xnvctrl is not supported on your system, failed to overclock memory transfer rate\n");
return false;
}
NVCTRLPerformanceLevelQuery query;
if(!xnvctrl_get_performance_levels(&self->xnvctrl, &query) || query.num_performance_levels == 0) {
fprintf(stderr, "gsr warning: gsr_overclock_start: failed to get performance levels for overclocking\n");
return false;
}
self->num_performance_levels = query.num_performance_levels;
qsort(query.performance_level, query.num_performance_levels, sizeof(NVCTRLPerformanceLevel), compare_mem_transfer_rate_max_asc);
int target_transfer_rate_offset = xnvctrl_get_attribute_max_value(&self->xnvctrl, query.num_performance_levels, NVCTRL_ATTRIB_GPU_MEM_TRANSFER_RATE);
if(query.num_performance_levels > 1) {
const int transfer_rate_max_diff = query.performance_level[query.num_performance_levels - 1].mem_transfer_rate_max - query.performance_level[query.num_performance_levels - 2].mem_transfer_rate_max;
target_transfer_rate_offset = min_int(target_transfer_rate_offset, transfer_rate_max_diff);
if(target_transfer_rate_offset >= 0 && xnvctrl_set_attribute_offset(&self->xnvctrl, self->num_performance_levels, target_transfer_rate_offset, NVCTRL_ATTRIB_GPU_MEM_TRANSFER_RATE)) {
fprintf(stderr, "gsr info: gsr_overclock_start: successfully set memory transfer rate offset to %d\n", target_transfer_rate_offset);
} else {
fprintf(stderr, "gsr info: gsr_overclock_start: failed to overclock memory transfer rate offset to %d\n", target_transfer_rate_offset);
}
}
// TODO: Sort by nv_clock_max
// TODO: Enable. Crashes on my system (gtx 1080) so it's disabled for now. Seems to crash even if graphics clock is increasd by 1, let alone 1200
/*
int target_nv_clock_offset = xnvctrl_get_attribute_max_value(&self->xnvctrl, query.num_performance_levels, NVCTRL_GPU_NVCLOCK);
if(query.num_performance_levels > 1) {
const int nv_clock_max_diff = query.performance_level[query.num_performance_levels - 1].nv_clock_max - query.performance_level[query.num_performance_levels - 2].nv_clock_max;
target_nv_clock_offset = min_int(target_nv_clock_offset, nv_clock_max_diff);
if(target_nv_clock_offset >= 0 && xnvctrl_set_attribute_offset(&self->xnvctrl, self->num_performance_levels, target_nv_clock_offset, NVCTRL_GPU_NVCLOCK)) {
fprintf(stderr, "gsr info: gsr_overclock_start: successfully set nv clock offset to %d\n", target_nv_clock_offset);
} else {
fprintf(stderr, "gsr info: gsr_overclock_start: failed to overclock nv clock offset to %d\n", target_nv_clock_offset);
}
}
*/
XSync(self->xnvctrl.display, False);
return true;
}
void gsr_overclock_stop(gsr_overclock *self) {
xnvctrl_set_attribute_offset(&self->xnvctrl, self->num_performance_levels, 0, NVCTRL_ATTRIB_GPU_MEM_TRANSFER_RATE);
//xnvctrl_set_attribute_offset(&self->xnvctrl, self->num_performance_levels, 0, NVCTRL_GPU_NVCLOCK);
XSync(self->xnvctrl.display, False);
}

864
src/pipewire_audio.c Normal file
View File

@ -0,0 +1,864 @@
#include "../include/pipewire_audio.h"
#include <pipewire/pipewire.h>
#include <pipewire/extensions/metadata.h>
#include <pipewire/impl-module.h>
typedef struct {
const gsr_pipewire_audio_port *output_port;
const gsr_pipewire_audio_port *input_port;
} gsr_pipewire_audio_desired_link;
static void on_core_info_cb(void *user_data, const struct pw_core_info *info) {
gsr_pipewire_audio *self = user_data;
//fprintf(stderr, "server name: %s\n", info->name);
}
static void on_core_error_cb(void *user_data, uint32_t id, int seq, int res, const char *message) {
gsr_pipewire_audio *self = user_data;
//fprintf(stderr, "gsr error: pipewire: error id:%u seq:%d res:%d: %s\n", id, seq, res, message);
pw_thread_loop_signal(self->thread_loop, false);
}
static void on_core_done_cb(void *user_data, uint32_t id, int seq) {
gsr_pipewire_audio *self = user_data;
if(id == PW_ID_CORE && self->server_version_sync == seq)
pw_thread_loop_signal(self->thread_loop, false);
}
static const struct pw_core_events core_events = {
PW_VERSION_CORE_EVENTS,
.info = on_core_info_cb,
.done = on_core_done_cb,
.error = on_core_error_cb,
};
static gsr_pipewire_audio_node* gsr_pipewire_audio_get_node_by_name_case_insensitive(gsr_pipewire_audio *self, const char *node_name, gsr_pipewire_audio_node_type node_type) {
for(size_t i = 0; i < self->num_stream_nodes; ++i) {
const gsr_pipewire_audio_node *node = &self->stream_nodes[i];
if(node->type == node_type && strcasecmp(node->name, node_name) == 0)
return &self->stream_nodes[i];
}
return NULL;
}
static gsr_pipewire_audio_port* gsr_pipewire_audio_get_node_port_by_name(gsr_pipewire_audio *self, uint32_t node_id, const char *port_name) {
for(size_t i = 0; i < self->num_ports; ++i) {
if(self->ports[i].node_id == node_id && strcmp(self->ports[i].name, port_name) == 0)
return &self->ports[i];
}
return NULL;
}
static bool requested_link_matches_name_case_insensitive(const gsr_pipewire_audio_requested_link *requested_link, const char *name) {
for(int i = 0; i < requested_link->num_outputs; ++i) {
if(requested_link->outputs[i].type == GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_STANDARD && strcasecmp(requested_link->outputs[i].name, name) == 0)
return true;
}
return false;
}
static bool requested_link_matches_name_case_insensitive_any_type(const gsr_pipewire_audio *self, const gsr_pipewire_audio_requested_link *requested_link, const char *name) {
for(int i = 0; i < requested_link->num_outputs; ++i) {
switch(requested_link->outputs[i].type) {
case GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_STANDARD: {
if(strcasecmp(requested_link->outputs[i].name, name) == 0)
return true;
break;
}
case GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT: {
if(strcasecmp(self->default_output_device_name, name) == 0)
return true;
break;
}
case GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_INPUT: {
if(strcasecmp(self->default_input_device_name, name) == 0)
return true;
break;
}
}
}
return false;
}
static bool requested_link_has_type(const gsr_pipewire_audio_requested_link *requested_link, gsr_pipewire_audio_requested_type type) {
for(int i = 0; i < requested_link->num_outputs; ++i) {
if(requested_link->outputs[i].type == type)
return true;
}
return false;
}
static void gsr_pipewire_get_node_input_port_by_type(gsr_pipewire_audio *self, const gsr_pipewire_audio_node *input_node, gsr_pipewire_audio_link_input_type input_type,
const gsr_pipewire_audio_port **input_fl_port, const gsr_pipewire_audio_port **input_fr_port)
{
*input_fl_port = NULL;
*input_fr_port = NULL;
switch(input_type) {
case GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM: {
*input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, input_node->id, "input_FL");
*input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, input_node->id, "input_FR");
break;
}
case GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK: {
*input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, input_node->id, "playback_FL");
*input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, input_node->id, "playback_FR");
break;
}
}
}
static bool string_starts_with(const char *str, const char *substr) {
const int len = strlen(str);
const int substr_len = strlen(substr);
return len >= substr_len && memcmp(str, substr, substr_len) == 0;
}
static bool string_ends_with(const char *str, const char *substr) {
const int len = strlen(str);
const int substr_len = strlen(substr);
return len >= substr_len && memcmp(str + len - substr_len, substr, substr_len) == 0;
}
/* Returns number of desired links */
static size_t gsr_pipewire_get_node_output_ports(gsr_pipewire_audio *self, const gsr_pipewire_audio_node *output_node,
gsr_pipewire_audio_desired_link *desired_links, size_t desired_links_max_size,
const gsr_pipewire_audio_port *input_fl_port, const gsr_pipewire_audio_port *input_fr_port)
{
size_t num_desired_links = 0;
for(size_t i = 0; i < self->num_ports && num_desired_links < desired_links_max_size; ++i) {
if(self->ports[i].node_id != output_node->id)
continue;
if(string_starts_with(self->ports[i].name, "playback_"))
continue;
if(string_ends_with(self->ports[i].name, "_MONO") || string_ends_with(self->ports[i].name, "_FC") || string_ends_with(self->ports[i].name, "_LFE")) {
if(num_desired_links + 2 >= desired_links_max_size)
break;
desired_links[num_desired_links + 0] = (gsr_pipewire_audio_desired_link){ .output_port = &self->ports[i], .input_port = input_fl_port };
desired_links[num_desired_links + 1] = (gsr_pipewire_audio_desired_link){ .output_port = &self->ports[i], .input_port = input_fr_port };
num_desired_links += 2;
} else if(string_ends_with(self->ports[i].name, "_FL") || string_ends_with(self->ports[i].name, "_RL") || string_ends_with(self->ports[i].name, "_SL")) {
if(num_desired_links + 1 >= desired_links_max_size)
break;
desired_links[num_desired_links] = (gsr_pipewire_audio_desired_link){ .output_port = &self->ports[i], .input_port = input_fl_port };
num_desired_links += 1;
} else if(string_ends_with(self->ports[i].name, "_FR") || string_ends_with(self->ports[i].name, "_RR") || string_ends_with(self->ports[i].name, "_SR")) {
if(num_desired_links + 1 >= desired_links_max_size)
break;
desired_links[num_desired_links] = (gsr_pipewire_audio_desired_link){ .output_port = &self->ports[i], .input_port = input_fr_port };
num_desired_links += 1;
}
}
return num_desired_links;
}
static void gsr_pipewire_audio_establish_link(gsr_pipewire_audio *self, const gsr_pipewire_audio_port *output_port, const gsr_pipewire_audio_port *input_port) {
// TODO: Detect if link already exists before so we dont create these proxies when not needed.
// We could do that by saving which nodes have been linked with which nodes after linking them.
//fprintf(stderr, "linking!\n");
// TODO: error check and cleanup
struct pw_properties *props = pw_properties_new(NULL, NULL);
pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", output_port->id);
pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", input_port->id);
// TODO: Clean this up when removing node
struct pw_proxy *proxy = pw_core_create_object(self->core, "link-factory", PW_TYPE_INTERFACE_Link, PW_VERSION_LINK, &props->dict, 0);
//self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
pw_properties_free(props);
}
static void gsr_pipewire_audio_create_link(gsr_pipewire_audio *self, const gsr_pipewire_audio_requested_link *requested_link) {
const gsr_pipewire_audio_node_type requested_link_node_type = requested_link->input_type == GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE;
const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, requested_link->input_name, requested_link_node_type);
if(!stream_input_node)
return;
const gsr_pipewire_audio_port *input_fl_port = NULL;
const gsr_pipewire_audio_port *input_fr_port = NULL;
gsr_pipewire_get_node_input_port_by_type(self, stream_input_node, requested_link->input_type, &input_fl_port, &input_fr_port);
if(!input_fl_port || !input_fr_port)
return;
gsr_pipewire_audio_desired_link desired_links[64];
for(size_t i = 0; i < self->num_stream_nodes; ++i) {
const gsr_pipewire_audio_node *output_node = &self->stream_nodes[i];
if(output_node->type != requested_link->output_type)
continue;
const bool requested_link_matches_app = requested_link_matches_name_case_insensitive_any_type(self, requested_link, output_node->name);
if(requested_link->inverted) {
if(requested_link_matches_app)
continue;
} else {
if(!requested_link_matches_app)
continue;
}
const size_t num_desired_links = gsr_pipewire_get_node_output_ports(self, output_node, desired_links, 64, input_fl_port, input_fr_port);
for(size_t j = 0; j < num_desired_links; ++j) {
gsr_pipewire_audio_establish_link(self, desired_links[j].output_port, desired_links[j].input_port);
}
}
}
static void gsr_pipewire_audio_create_links(gsr_pipewire_audio *self) {
for(size_t i = 0; i < self->num_requested_links; ++i) {
gsr_pipewire_audio_create_link(self, &self->requested_links[i]);
}
}
static void gsr_pipewire_audio_create_link_for_default_devices(gsr_pipewire_audio *self, const gsr_pipewire_audio_requested_link *requested_link, gsr_pipewire_audio_requested_type default_device_type) {
if(default_device_type == GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_STANDARD)
return;
const char *device_name = default_device_type == GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT ? self->default_output_device_name : self->default_input_device_name;
if(device_name[0] == '\0')
return;
if(!requested_link_has_type(requested_link, default_device_type))
return;
const gsr_pipewire_audio_node_type requested_link_node_type = requested_link->input_type == GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE;
const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, requested_link->input_name, requested_link_node_type);
if(!stream_input_node)
return;
const gsr_pipewire_audio_port *input_fl_port = NULL;
const gsr_pipewire_audio_port *input_fr_port = NULL;
gsr_pipewire_get_node_input_port_by_type(self, stream_input_node, requested_link->input_type, &input_fl_port, &input_fr_port);
if(!input_fl_port || !input_fr_port)
return;
const gsr_pipewire_audio_node *stream_output_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, device_name, GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE);
if(!stream_output_node)
return;
gsr_pipewire_audio_desired_link desired_links[64];
const size_t num_desired_links = gsr_pipewire_get_node_output_ports(self, stream_output_node, desired_links, 64, input_fl_port, input_fr_port);
for(size_t i = 0; i < num_desired_links; ++i) {
gsr_pipewire_audio_establish_link(self, desired_links[i].output_port, desired_links[i].input_port);
}
}
static void gsr_pipewire_audio_create_links_for_default_devices(gsr_pipewire_audio *self, gsr_pipewire_audio_requested_type default_device_type) {
for(size_t i = 0; i < self->num_requested_links; ++i) {
gsr_pipewire_audio_create_link_for_default_devices(self, &self->requested_links[i], default_device_type);
}
}
static void gsr_pipewire_audio_destroy_links_by_output_to_input(gsr_pipewire_audio *self, uint32_t output_node_id, uint32_t input_node_id) {
for(size_t i = 0; i < self->num_links; ++i) {
if(self->links[i].output_node_id == output_node_id && self->links[i].input_node_id == input_node_id)
pw_registry_destroy(self->registry, self->links[i].id);
}
}
static void gsr_pipewire_destroy_default_device_link(gsr_pipewire_audio *self, const gsr_pipewire_audio_requested_link *requested_link, gsr_pipewire_audio_requested_type default_device_type) {
if(default_device_type == GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_STANDARD)
return;
const char *device_name = default_device_type == GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT ? self->default_output_device_name : self->default_input_device_name;
if(device_name[0] == '\0')
return;
if(!requested_link_has_type(requested_link, default_device_type))
return;
/* default_output and default_input can be the same device. In that case both are the same link and we dont want to remove the link */
const gsr_pipewire_audio_requested_type opposite_device_type = default_device_type == GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT ? GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_INPUT : GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT;
const char *opposite_device_name = opposite_device_type == GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT ? self->default_output_device_name : self->default_input_device_name;
if(requested_link_has_type(requested_link, opposite_device_type) && strcmp(device_name, opposite_device_name) == 0)
return;
const gsr_pipewire_audio_node_type requested_link_node_type = requested_link->input_type == GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE;
const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, requested_link->input_name, requested_link_node_type);
if(!stream_input_node)
return;
const gsr_pipewire_audio_node *stream_output_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, device_name, GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE);
if(!stream_output_node)
return;
if(requested_link_matches_name_case_insensitive(requested_link, stream_output_node->name))
return;
gsr_pipewire_audio_destroy_links_by_output_to_input(self, stream_output_node->id, stream_input_node->id);
//fprintf(stderr, "destroying a link from %u to %u\n", stream_output_node->id, stream_input_node->id);
}
static void gsr_pipewire_destroy_default_device_links(gsr_pipewire_audio *self, gsr_pipewire_audio_requested_type default_device_type) {
for(size_t i = 0; i < self->num_requested_links; ++i) {
gsr_pipewire_destroy_default_device_link(self, &self->requested_links[i], default_device_type);
}
}
static bool json_get_value(const char *json_str, const char *key, char *value, size_t value_size) {
char key_full[32];
const int key_full_size = snprintf(key_full, sizeof(key_full), "\"%s\":", key);
const char *start = strstr(json_str, key_full);
if(!start)
return false;
start += key_full_size;
const char *value_start = strchr(start, '"');
if(!value_start)
return false;
value_start += 1;
const char *value_end = strchr(value_start, '"');
if(!value_end)
return false;
snprintf(value, value_size, "%.*s", (int)(value_end - value_start), value_start);
return true;
}
static int on_metadata_property_cb(void *data, uint32_t id, const char *key, const char *type, const char *value) {
(void)type;
gsr_pipewire_audio *self = data;
if(id == PW_ID_CORE && key && value) {
char value_decoded[128];
if(strcmp(key, "default.audio.sink") == 0) {
if(json_get_value(value, "name", value_decoded, sizeof(value_decoded)) && strcmp(value_decoded, self->default_output_device_name) != 0) {
gsr_pipewire_destroy_default_device_links(self, GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT);
snprintf(self->default_output_device_name, sizeof(self->default_output_device_name), "%s", value_decoded);
gsr_pipewire_audio_create_links_for_default_devices(self, GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT);
}
} else if(strcmp(key, "default.audio.source") == 0) {
if(json_get_value(value, "name", value_decoded, sizeof(value_decoded)) && strcmp(value_decoded, self->default_input_device_name) != 0) {
gsr_pipewire_destroy_default_device_links(self, GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_INPUT);
snprintf(self->default_input_device_name, sizeof(self->default_input_device_name), "%s", value_decoded);
gsr_pipewire_audio_create_links_for_default_devices(self, GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_INPUT);
}
}
}
return 0;
}
static const struct pw_metadata_events metadata_events = {
PW_VERSION_METADATA_EVENTS,
.property = on_metadata_property_cb,
};
static void on_metadata_proxy_removed_cb(void *data) {
gsr_pipewire_audio *self = data;
if(self->metadata_proxy) {
pw_proxy_destroy(self->metadata_proxy);
self->metadata_proxy = NULL;
}
}
static void on_metadata_proxy_destroy_cb(void *data) {
gsr_pipewire_audio *self = data;
spa_hook_remove(&self->metadata_listener);
spa_hook_remove(&self->metadata_proxy_listener);
spa_zero(self->metadata_listener);
spa_zero(self->metadata_proxy_listener);
self->metadata_proxy = NULL;
}
static const struct pw_proxy_events metadata_proxy_events = {
PW_VERSION_PROXY_EVENTS,
.removed = on_metadata_proxy_removed_cb,
.destroy = on_metadata_proxy_destroy_cb,
};
static bool gsr_pipewire_audio_listen_on_metadata(gsr_pipewire_audio *self, uint32_t id) {
if(self->metadata_proxy) {
pw_proxy_destroy(self->metadata_proxy);
self->metadata_proxy = NULL;
}
self->metadata_proxy = pw_registry_bind(self->registry, id, PW_TYPE_INTERFACE_Metadata, PW_VERSION_METADATA, 0);
if(!self->metadata_proxy) {
fprintf(stderr, "gsr error: gsr_pipewire_audio_listen_on_metadata: failed to bind to registry\n");
return false;
}
pw_proxy_add_object_listener(self->metadata_proxy, &self->metadata_listener, &metadata_events, self);
pw_proxy_add_listener(self->metadata_proxy, &self->metadata_proxy_listener, &metadata_proxy_events, self);
self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
return true;
}
static bool array_ensure_capacity(void **array, size_t size, size_t *capacity_items, size_t element_size) {
if(size + 1 >= *capacity_items) {
size_t new_capacity_items = *capacity_items * 2;
if(new_capacity_items == 0)
new_capacity_items = 32;
void *new_data = realloc(*array, new_capacity_items * element_size);
if(!new_data) {
fprintf(stderr, "gsr error: pipewire_audio: failed to reallocate memory\n");
return false;
}
*array = new_data;
*capacity_items = new_capacity_items;
}
return true;
}
static void registry_event_global(void *data, uint32_t id, uint32_t permissions,
const char *type, uint32_t version,
const struct spa_dict *props)
{
//fprintf(stderr, "add: id: %d, type: %s\n", (int)id, type);
gsr_pipewire_audio *self = (gsr_pipewire_audio*)data;
if(!props || !type || !self->running)
return;
//pw_properties_new_dict(props);
if(strcmp(type, PW_TYPE_INTERFACE_Node) == 0) {
const char *node_name = spa_dict_lookup(props, PW_KEY_NODE_NAME);
const char *media_class = spa_dict_lookup(props, PW_KEY_MEDIA_CLASS);
//fprintf(stderr, " node id: %u, node name: %s, media class: %s\n", id, node_name, media_class);
const bool is_stream_output = media_class && strcmp(media_class, "Stream/Output/Audio") == 0;
const bool is_stream_input = media_class && strcmp(media_class, "Stream/Input/Audio") == 0;
const bool is_sink = media_class && string_starts_with(media_class, "Audio/Sink"); // Matches Audio/Sink/Virtual as well
const bool is_source = media_class && string_starts_with(media_class, "Audio/Source"); // Matches Audio/Source/Virtual as well
if(node_name && (is_stream_output || is_stream_input || is_sink || is_source)) {
//const char *application_binary = spa_dict_lookup(props, PW_KEY_APP_PROCESS_BINARY);
//const char *application_name = spa_dict_lookup(props, PW_KEY_APP_NAME);
//fprintf(stderr, " node name: %s, app binary: %s, app name: %s\n", node_name, application_binary, application_name);
if(!array_ensure_capacity((void**)&self->stream_nodes, self->num_stream_nodes, &self->stream_nodes_capacity_items, sizeof(gsr_pipewire_audio_node)))
return;
char *node_name_copy = strdup(node_name);
if(node_name_copy) {
self->stream_nodes[self->num_stream_nodes].id = id;
self->stream_nodes[self->num_stream_nodes].name = node_name_copy;
if(is_stream_output)
self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT;
else if(is_stream_input)
self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT;
else if(is_sink || is_source)
self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE;
++self->num_stream_nodes;
gsr_pipewire_audio_create_links(self);
}
}
} else if(strcmp(type, PW_TYPE_INTERFACE_Port) == 0) {
const char *port_name = spa_dict_lookup(props, PW_KEY_PORT_NAME);
const char *port_direction = spa_dict_lookup(props, PW_KEY_PORT_DIRECTION);
gsr_pipewire_audio_port_direction direction = -1;
if(port_direction && strcmp(port_direction, "in") == 0)
direction = GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_INPUT;
else if(port_direction && strcmp(port_direction, "out") == 0)
direction = GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_OUTPUT;
const char *node_id = spa_dict_lookup(props, PW_KEY_NODE_ID);
const int node_id_num = node_id ? atoi(node_id) : 0;
if(port_name && direction >= 0 && node_id_num > 0) {
if(!array_ensure_capacity((void**)&self->ports, self->num_ports, &self->ports_capacity_items, sizeof(gsr_pipewire_audio_port)))
return;
//fprintf(stderr, " port name: %s, node id: %d, direction: %s\n", port_name, node_id_num, port_direction);
char *port_name_copy = strdup(port_name);
if(port_name_copy) {
//fprintf(stderr, " port id: %u, node id: %u, name: %s\n", id, node_id_num, port_name_copy);
self->ports[self->num_ports].id = id;
self->ports[self->num_ports].node_id = node_id_num;
self->ports[self->num_ports].direction = direction;
self->ports[self->num_ports].name = port_name_copy;
++self->num_ports;
gsr_pipewire_audio_create_links(self);
}
}
} else if(strcmp(type, PW_TYPE_INTERFACE_Link) == 0) {
const char *output_node = spa_dict_lookup(props, PW_KEY_LINK_OUTPUT_NODE);
const char *input_node = spa_dict_lookup(props, PW_KEY_LINK_INPUT_NODE);
const uint32_t output_node_id_num = output_node ? atoi(output_node) : 0;
const uint32_t input_node_id_num = input_node ? atoi(input_node) : 0;
if(output_node_id_num > 0 && input_node_id_num > 0) {
if(!array_ensure_capacity((void**)&self->links, self->num_links, &self->links_capacity_items, sizeof(gsr_pipewire_audio_link)))
return;
//fprintf(stderr, " new link (%u): %u -> %u\n", id, output_node_id_num, input_node_id_num);
self->links[self->num_links].id = id;
self->links[self->num_links].output_node_id = output_node_id_num;
self->links[self->num_links].input_node_id = input_node_id_num;
++self->num_links;
}
} else if(strcmp(type, PW_TYPE_INTERFACE_Metadata) == 0) {
const char *name = spa_dict_lookup(props, PW_KEY_METADATA_NAME);
if(name && strcmp(name, "default") == 0)
gsr_pipewire_audio_listen_on_metadata(self, id);
}
}
static bool gsr_pipewire_audio_remove_node_by_id(gsr_pipewire_audio *self, uint32_t node_id) {
for(size_t i = 0; i < self->num_stream_nodes; ++i) {
if(self->stream_nodes[i].id != node_id)
continue;
free(self->stream_nodes[i].name);
self->stream_nodes[i] = self->stream_nodes[self->num_stream_nodes - 1];
--self->num_stream_nodes;
return true;
}
return false;
}
static bool gsr_pipewire_audio_remove_port_by_id(gsr_pipewire_audio *self, uint32_t port_id) {
for(size_t i = 0; i < self->num_ports; ++i) {
if(self->ports[i].id != port_id)
continue;
free(self->ports[i].name);
self->ports[i] = self->ports[self->num_ports - 1];
--self->num_ports;
return true;
}
return false;
}
static bool gsr_pipewire_audio_remove_link_by_id(gsr_pipewire_audio *self, uint32_t link_id) {
for(size_t i = 0; i < self->num_links; ++i) {
if(self->links[i].id != link_id)
continue;
self->links[i] = self->links[self->num_links - 1];
--self->num_links;
return true;
}
return false;
}
static void registry_event_global_remove(void *data, uint32_t id) {
//fprintf(stderr, "remove: %d\n", (int)id);
gsr_pipewire_audio *self = (gsr_pipewire_audio*)data;
if(gsr_pipewire_audio_remove_node_by_id(self, id)) {
//fprintf(stderr, "removed node\n");
return;
}
if(gsr_pipewire_audio_remove_port_by_id(self, id)) {
//fprintf(stderr, "removed port\n");
return;
}
if(gsr_pipewire_audio_remove_link_by_id(self, id)) {
//fprintf(stderr, "removed link\n");
return;
}
}
static const struct pw_registry_events registry_events = {
PW_VERSION_REGISTRY_EVENTS,
.global = registry_event_global,
.global_remove = registry_event_global_remove,
};
bool gsr_pipewire_audio_init(gsr_pipewire_audio *self) {
memset(self, 0, sizeof(*self));
self->running = true;
pw_init(NULL, NULL);
self->thread_loop = pw_thread_loop_new("gsr screen capture", NULL);
if(!self->thread_loop) {
fprintf(stderr, "gsr error: gsr_pipewire_audio_init: failed to create pipewire thread\n");
gsr_pipewire_audio_deinit(self);
return false;
}
self->context = pw_context_new(pw_thread_loop_get_loop(self->thread_loop), NULL, 0);
if(!self->context) {
fprintf(stderr, "gsr error: gsr_pipewire_audio_init: failed to create pipewire context\n");
gsr_pipewire_audio_deinit(self);
return false;
}
pw_context_load_module(self->context, "libpipewire-module-link-factory", NULL, NULL);
if(pw_thread_loop_start(self->thread_loop) < 0) {
fprintf(stderr, "gsr error: gsr_pipewire_audio_init: failed to start thread\n");
gsr_pipewire_audio_deinit(self);
return false;
}
pw_thread_loop_lock(self->thread_loop);
self->core = pw_context_connect(self->context, pw_properties_new(PW_KEY_REMOTE_NAME, NULL, NULL), 0);
if(!self->core) {
pw_thread_loop_unlock(self->thread_loop);
gsr_pipewire_audio_deinit(self);
return false;
}
// TODO: Error check
pw_core_add_listener(self->core, &self->core_listener, &core_events, self);
self->registry = pw_core_get_registry(self->core, PW_VERSION_REGISTRY, 0);
pw_registry_add_listener(self->registry, &self->registry_listener, &registry_events, self);
self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
pw_thread_loop_wait(self->thread_loop);
pw_thread_loop_unlock(self->thread_loop);
return true;
}
static gsr_pipewire_audio_link* gsr_pipewire_audio_get_first_link_to_node(gsr_pipewire_audio *self, uint32_t node_id) {
for(size_t i = 0; i < self->num_links; ++i) {
if(self->links[i].input_node_id == node_id)
return &self->links[i];
}
return NULL;
}
static void gsr_pipewire_audio_destroy_requested_links(gsr_pipewire_audio *self) {
pw_thread_loop_lock(self->thread_loop);
self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
pw_thread_loop_wait(self->thread_loop);
for(size_t requested_link_index = 0; requested_link_index < self->num_requested_links; ++requested_link_index) {
const gsr_pipewire_audio_node_type requested_link_node_type = self->requested_links[requested_link_index].input_type == GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE;
const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, self->requested_links[requested_link_index].input_name, requested_link_node_type);
if(!stream_input_node)
continue;
for(;;) {
gsr_pipewire_audio_link *link = gsr_pipewire_audio_get_first_link_to_node(self, stream_input_node->id);
if(!link)
break;
pw_registry_destroy(self->registry, link->id);
self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
pw_thread_loop_wait(self->thread_loop);
usleep(10 * 1000);
}
}
pw_thread_loop_unlock(self->thread_loop);
}
void gsr_pipewire_audio_deinit(gsr_pipewire_audio *self) {
self->running = false;
if(self->thread_loop) {
/* We need to manually destroy links first, otherwise the linked audio sources will be paused when closing the program */
gsr_pipewire_audio_destroy_requested_links(self);
//pw_thread_loop_wait(self->thread_loop);
pw_thread_loop_stop(self->thread_loop);
}
if(self->metadata_proxy) {
spa_hook_remove(&self->metadata_listener);
spa_hook_remove(&self->metadata_proxy_listener);
pw_proxy_destroy(self->metadata_proxy);
spa_zero(self->metadata_listener);
spa_zero(self->metadata_proxy_listener);
self->metadata_proxy = NULL;
}
spa_hook_remove(&self->registry_listener);
spa_hook_remove(&self->core_listener);
if(self->core) {
pw_core_disconnect(self->core);
self->core = NULL;
}
if(self->context) {
pw_context_destroy(self->context);
self->context = NULL;
}
if(self->thread_loop) {
pw_thread_loop_destroy(self->thread_loop);
self->thread_loop = NULL;
}
if(self->stream_nodes) {
for(size_t i = 0; i < self->num_stream_nodes; ++i) {
free(self->stream_nodes[i].name);
}
self->num_stream_nodes = 0;
self->stream_nodes_capacity_items = 0;
free(self->stream_nodes);
self->stream_nodes = NULL;
}
if(self->ports) {
for(size_t i = 0; i < self->num_ports; ++i) {
free(self->ports[i].name);
}
self->num_ports = 0;
self->ports_capacity_items = 0;
free(self->ports);
self->ports = NULL;
}
if(self->links) {
self->num_links = 0;
self->links_capacity_items = 0;
free(self->links);
self->links = NULL;
}
if(self->requested_links) {
for(size_t i = 0; i < self->num_requested_links; ++i) {
for(int j = 0; j < self->requested_links[i].num_outputs; ++j) {
free(self->requested_links[i].outputs[j].name);
}
free(self->requested_links[i].outputs);
free(self->requested_links[i].input_name);
}
self->num_requested_links = 0;
self->requested_links_capacity_items = 0;
free(self->requested_links);
self->requested_links = NULL;
}
#if PW_CHECK_VERSION(0, 3, 49)
pw_deinit();
#endif
}
static bool string_remove_suffix(char *str, const char *suffix) {
int str_len = strlen(str);
int suffix_len = strlen(suffix);
if(str_len >= suffix_len && memcmp(str + str_len - suffix_len, suffix, suffix_len) == 0) {
str[str_len - suffix_len] = '\0';
return true;
} else {
return false;
}
}
static bool gsr_pipewire_audio_add_links_to_output(gsr_pipewire_audio *self, const char **output_names, int num_output_names, const char *input_name, gsr_pipewire_audio_node_type output_type, gsr_pipewire_audio_link_input_type input_type, bool inverted) {
if(!array_ensure_capacity((void**)&self->requested_links, self->num_requested_links, &self->requested_links_capacity_items, sizeof(gsr_pipewire_audio_requested_link)))
return false;
gsr_pipewire_audio_requested_output *outputs = calloc(num_output_names, sizeof(gsr_pipewire_audio_requested_output));
if(!outputs)
return false;
char *input_name_copy = strdup(input_name);
if(!input_name_copy)
goto error;
if(input_type == GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK)
string_remove_suffix(input_name_copy, ".monitor");
for(int i = 0; i < num_output_names; ++i) {
outputs[i].name = strdup(output_names[i]);
if(!outputs[i].name)
goto error;
outputs[i].type = GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_STANDARD;
if(output_type == GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE) {
string_remove_suffix(outputs[i].name, ".monitor");
if(strcmp(outputs[i].name, "default_output") == 0)
outputs[i].type = GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT;
else if(strcmp(outputs[i].name, "default_input") == 0)
outputs[i].type = GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_INPUT;
else
outputs[i].type = GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_STANDARD;
}
}
pw_thread_loop_lock(self->thread_loop);
self->requested_links[self->num_requested_links].outputs = outputs;
self->requested_links[self->num_requested_links].num_outputs = num_output_names;
self->requested_links[self->num_requested_links].input_name = input_name_copy;
self->requested_links[self->num_requested_links].output_type = output_type;
self->requested_links[self->num_requested_links].input_type = input_type;
self->requested_links[self->num_requested_links].inverted = inverted;
++self->num_requested_links;
gsr_pipewire_audio_create_link(self, &self->requested_links[self->num_requested_links - 1]);
// TODO: Remove these?
gsr_pipewire_audio_create_link_for_default_devices(self, &self->requested_links[self->num_requested_links - 1], GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_OUTPUT);
gsr_pipewire_audio_create_link_for_default_devices(self, &self->requested_links[self->num_requested_links - 1], GSR_PIPEWIRE_AUDIO_REQUESTED_TYPE_DEFAULT_INPUT);
pw_thread_loop_unlock(self->thread_loop);
return true;
error:
free(input_name_copy);
for(int i = 0; i < num_output_names; ++i) {
free(outputs[i].name);
}
free(outputs);
return false;
}
bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *stream_name_input) {
return gsr_pipewire_audio_add_links_to_output(self, app_names, num_app_names, stream_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM, false);
}
bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *stream_name_input) {
return gsr_pipewire_audio_add_links_to_output(self, app_names, num_app_names, stream_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM, true);
}
bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *sink_name_input) {
return gsr_pipewire_audio_add_links_to_output(self, app_names, num_app_names, sink_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK, false);
}
bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *sink_name_input) {
return gsr_pipewire_audio_add_links_to_output(self, app_names, num_app_names, sink_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK, true);
}
bool gsr_pipewire_audio_add_link_from_sources_to_stream(gsr_pipewire_audio *self, const char **source_names, int num_source_names, const char *stream_name_input) {
return gsr_pipewire_audio_add_links_to_output(self, source_names, num_source_names, stream_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM, false);
}
bool gsr_pipewire_audio_add_link_from_sources_to_sink(gsr_pipewire_audio *self, const char **source_names, int num_source_names, const char *sink_name_input) {
return gsr_pipewire_audio_add_links_to_output(self, source_names, num_source_names, sink_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK, false);
}
void gsr_pipewire_audio_for_each_app(gsr_pipewire_audio *self, gsr_pipewire_audio_app_query_callback callback, void *userdata) {
pw_thread_loop_lock(self->thread_loop);
for(int i = 0; i < (int)self->num_stream_nodes; ++i) {
const gsr_pipewire_audio_node *node = &self->stream_nodes[i];
if(node->type != GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT)
continue;
bool duplicate_app = false;
for(int j = i - 1; j >= 0; --j) {
const gsr_pipewire_audio_node *prev_node = &self->stream_nodes[j];
if(prev_node->type != GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT)
continue;
if(strcasecmp(node->name, prev_node->name) == 0) {
duplicate_app = true;
break;
}
}
if(duplicate_app)
continue;
if(!callback(node->name, userdata))
break;
}
pw_thread_loop_unlock(self->thread_loop);
}

947
src/pipewire_video.c Normal file
View File

@ -0,0 +1,947 @@
#include "../include/pipewire_video.h"
#include "../include/egl.h"
#include "../include/utils.h"
#include <pipewire/pipewire.h>
#include <spa/param/video/format-utils.h>
#include <spa/debug/types.h>
#include <drm_fourcc.h>
#include <fcntl.h>
#include <unistd.h>
/* This code is partially based on xr-video-player pipewire implementation which is based on obs-studio's pipewire implementation */
/* TODO: Make gsr_pipewire_video_init asynchronous */
/* TODO: Support hdr when pipewire supports it */
/* TODO: Test all of the image formats */
#ifndef SPA_POD_PROP_FLAG_DONT_FIXATE
#define SPA_POD_PROP_FLAG_DONT_FIXATE (1 << 4)
#endif
#if !PW_CHECK_VERSION(0, 3, 62)
enum spa_meta_videotransform_value {
SPA_META_TRANSFORMATION_None = 0, /**< no transform */
SPA_META_TRANSFORMATION_90, /**< 90 degree counter-clockwise */
SPA_META_TRANSFORMATION_180, /**< 180 degree counter-clockwise */
SPA_META_TRANSFORMATION_270, /**< 270 degree counter-clockwise */
SPA_META_TRANSFORMATION_Flipped, /**< 180 degree flipped around the vertical axis. Equivalent
* to a reflexion through the vertical line splitting the
* buffer in two equal sized parts */
SPA_META_TRANSFORMATION_Flipped90, /**< flip then rotate around 90 degree counter-clockwise */
SPA_META_TRANSFORMATION_Flipped180, /**< flip then rotate around 180 degree counter-clockwise */
SPA_META_TRANSFORMATION_Flipped270, /**< flip then rotate around 270 degree counter-clockwise */
};
/** a transformation of the buffer */
struct spa_meta_videotransform {
uint32_t transform; /**< orientation transformation that was applied to the buffer,
* one of enum spa_meta_videotransform_value */
};
#define SPA_META_VideoTransform 8
#endif
#define CURSOR_META_SIZE(width, height) \
(sizeof(struct spa_meta_cursor) + sizeof(struct spa_meta_bitmap) + \
width * height * 4)
static bool parse_pw_version(gsr_pipewire_video_data_version *dst, const char *version) {
const int n_matches = sscanf(version, "%d.%d.%d", &dst->major, &dst->minor, &dst->micro);
return n_matches == 3;
}
static bool check_pw_version(const gsr_pipewire_video_data_version *pw_version, int major, int minor, int micro) {
if (pw_version->major != major)
return pw_version->major > major;
if (pw_version->minor != minor)
return pw_version->minor > minor;
return pw_version->micro >= micro;
}
static void update_pw_versions(gsr_pipewire_video *self, const char *version) {
fprintf(stderr, "gsr info: pipewire: server version: %s\n", version);
fprintf(stderr, "gsr info: pipewire: library version: %s\n", pw_get_library_version());
fprintf(stderr, "gsr info: pipewire: header version: %s\n", pw_get_headers_version());
if(!parse_pw_version(&self->server_version, version))
fprintf(stderr, "gsr error: pipewire: failed to parse server version\n");
}
static void on_core_info_cb(void *user_data, const struct pw_core_info *info) {
gsr_pipewire_video *self = user_data;
update_pw_versions(self, info->version);
}
static void on_core_error_cb(void *user_data, uint32_t id, int seq, int res, const char *message) {
gsr_pipewire_video *self = user_data;
fprintf(stderr, "gsr error: pipewire: error id:%u seq:%d res:%d: %s\n", id, seq, res, message);
pw_thread_loop_signal(self->thread_loop, false);
}
static void on_core_done_cb(void *user_data, uint32_t id, int seq) {
gsr_pipewire_video *self = user_data;
if (id == PW_ID_CORE && self->server_version_sync == seq)
pw_thread_loop_signal(self->thread_loop, false);
}
static bool is_cursor_format_supported(const enum spa_video_format format) {
switch(format) {
case SPA_VIDEO_FORMAT_RGBx: return true;
case SPA_VIDEO_FORMAT_BGRx: return true;
case SPA_VIDEO_FORMAT_RGBA: return true;
case SPA_VIDEO_FORMAT_BGRA: return true;
case SPA_VIDEO_FORMAT_RGB: return true;
case SPA_VIDEO_FORMAT_BGR: return true;
case SPA_VIDEO_FORMAT_ARGB: return true;
case SPA_VIDEO_FORMAT_ABGR: return true;
#if PW_CHECK_VERSION(0, 3, 41)
case SPA_VIDEO_FORMAT_xRGB_210LE: return true;
case SPA_VIDEO_FORMAT_xBGR_210LE: return true;
case SPA_VIDEO_FORMAT_ARGB_210LE: return true;
case SPA_VIDEO_FORMAT_ABGR_210LE: return true;
#endif
default: break;
}
return false;
}
static const struct pw_core_events core_events = {
PW_VERSION_CORE_EVENTS,
.info = on_core_info_cb,
.done = on_core_done_cb,
.error = on_core_error_cb,
};
static void on_process_cb(void *user_data) {
gsr_pipewire_video *self = user_data;
/* Find the most recent buffer */
struct pw_buffer *pw_buf = NULL;
for(;;) {
struct pw_buffer *aux = pw_stream_dequeue_buffer(self->stream);
if(!aux)
break;
if(pw_buf)
pw_stream_queue_buffer(self->stream, pw_buf);
pw_buf = aux;
}
if(!pw_buf) {
fprintf(stderr, "gsr info: pipewire: out of buffers!\n");
return;
}
struct spa_buffer *buffer = pw_buf->buffer;
const bool has_buffer = buffer->datas[0].chunk->size != 0;
pthread_mutex_lock(&self->mutex);
bool buffer_updated = false;
if(has_buffer && buffer->datas[0].type == SPA_DATA_DmaBuf) {
for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
if(self->dmabuf_data[i].fd > 0) {
close(self->dmabuf_data[i].fd);
self->dmabuf_data[i].fd = -1;
}
}
self->dmabuf_num_planes = buffer->n_datas;
if(self->dmabuf_num_planes > GSR_PIPEWIRE_VIDEO_DMABUF_MAX_PLANES)
self->dmabuf_num_planes = GSR_PIPEWIRE_VIDEO_DMABUF_MAX_PLANES;
for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
self->dmabuf_data[i].fd = dup(buffer->datas[i].fd);
self->dmabuf_data[i].offset = buffer->datas[i].chunk->offset;
self->dmabuf_data[i].stride = buffer->datas[i].chunk->stride;
}
buffer_updated = true;
}
// TODO: Move down to read_metadata
struct spa_meta_region *region = spa_buffer_find_meta_data(buffer, SPA_META_VideoCrop, sizeof(*region));
if(region && spa_meta_region_is_valid(region)) {
// fprintf(stderr, "gsr info: pipewire: crop Region available (%dx%d+%d+%d)\n",
// region->region.position.x, region->region.position.y,
// region->region.size.width, region->region.size.height);
self->crop.x = region->region.position.x;
self->crop.y = region->region.position.y;
self->crop.width = region->region.size.width;
self->crop.height = region->region.size.height;
self->crop.valid = true;
} else {
self->crop.valid = false;
}
struct spa_meta_videotransform *video_transform = spa_buffer_find_meta_data(buffer, SPA_META_VideoTransform, sizeof(*video_transform));
enum spa_meta_videotransform_value transform = SPA_META_TRANSFORMATION_None;
if(video_transform)
transform = video_transform->transform;
self->rotation = GSR_MONITOR_ROT_0;
switch(transform) {
case SPA_META_TRANSFORMATION_90:
self->rotation = GSR_MONITOR_ROT_90;
break;
case SPA_META_TRANSFORMATION_180:
self->rotation = GSR_MONITOR_ROT_180;
break;
case SPA_META_TRANSFORMATION_270:
self->rotation = GSR_MONITOR_ROT_270;
break;
default:
// TODO: Support other rotations. Wayland compositors dont use them yet so it's ok to not support it now
break;
}
const struct spa_meta *video_damage = spa_buffer_find_meta(buffer, SPA_META_VideoDamage);
if(video_damage) {
struct spa_meta_region *meta_region = NULL;
spa_meta_for_each(meta_region, video_damage) {
if(meta_region->region.size.width == 0 || meta_region->region.size.height == 0)
continue;
self->damaged = true;
break;
}
} else if(buffer_updated) {
self->damaged = true;
}
const struct spa_meta_cursor *cursor = spa_buffer_find_meta_data(buffer, SPA_META_Cursor, sizeof(*cursor));
self->cursor.valid = cursor && spa_meta_cursor_is_valid(cursor);
if (self->cursor.visible && self->cursor.valid) {
struct spa_meta_bitmap *bitmap = NULL;
if (cursor->bitmap_offset)
bitmap = SPA_MEMBER(cursor, cursor->bitmap_offset, struct spa_meta_bitmap);
// TODO: Maybe check if the cursor is actually visible by checking if there are visible pixels
if (bitmap && bitmap->size.width > 0 && bitmap->size.height > 0 && is_cursor_format_supported(bitmap->format)) {
const uint8_t *bitmap_data = SPA_MEMBER(bitmap, bitmap->offset, uint8_t);
fprintf(stderr, "gsr info: pipewire: cursor bitmap update, size: %dx%d, format: %s\n",
(int)bitmap->size.width, (int)bitmap->size.height, spa_debug_type_find_name(spa_type_video_format, bitmap->format));
const size_t bitmap_size = bitmap->size.width * bitmap->size.height * 4;
uint8_t *new_bitmap_data = realloc(self->cursor.data, bitmap_size);
if(new_bitmap_data) {
self->cursor.data = new_bitmap_data;
/* TODO: Convert bgr and other image formats to rgb here */
memcpy(self->cursor.data, bitmap_data, bitmap_size);
}
self->cursor.hotspot_x = cursor->hotspot.x;
self->cursor.hotspot_y = cursor->hotspot.y;
self->cursor.width = bitmap->size.width;
self->cursor.height = bitmap->size.height;
self->damaged = true;
}
if(cursor->position.x != self->cursor.x || cursor->position.y != self->cursor.y)
self->damaged = true;
self->cursor.x = cursor->position.x;
self->cursor.y = cursor->position.y;
//fprintf(stderr, "gsr info: pipewire: cursor: %d %d %d %d\n", cursor->hotspot.x, cursor->hotspot.y, cursor->position.x, cursor->position.y);
}
pthread_mutex_unlock(&self->mutex);
pw_stream_queue_buffer(self->stream, pw_buf);
}
static void on_param_changed_cb(void *user_data, uint32_t id, const struct spa_pod *param) {
gsr_pipewire_video *self = user_data;
if (!param || id != SPA_PARAM_Format)
return;
int result = spa_format_parse(param, &self->format.media_type, &self->format.media_subtype);
if (result < 0)
return;
if (self->format.media_type != SPA_MEDIA_TYPE_video || self->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
return;
pthread_mutex_lock(&self->mutex);
spa_format_video_raw_parse(param, &self->format.info.raw);
pthread_mutex_unlock(&self->mutex);
uint32_t buffer_types = 0;
const bool has_modifier = spa_pod_find_prop(param, NULL, SPA_FORMAT_VIDEO_modifier) != NULL;
if(has_modifier || check_pw_version(&self->server_version, 0, 3, 24))
buffer_types |= 1 << SPA_DATA_DmaBuf;
fprintf(stderr, "gsr info: pipewire: negotiated format:\n");
fprintf(stderr, "gsr info: pipewire: Format: %d (%s)\n",
self->format.info.raw.format,
spa_debug_type_find_name(spa_type_video_format, self->format.info.raw.format));
self->has_modifier = has_modifier;
if(self->has_modifier) {
fprintf(stderr, "gsr info: pipewire: Modifier: 0x%" PRIx64 "\n", self->format.info.raw.modifier);
}
fprintf(stderr, "gsr info: pipewire: Size: %dx%d\n", self->format.info.raw.size.width, self->format.info.raw.size.height);
fprintf(stderr, "gsr info: pipewire: Framerate: %d/%d\n", self->format.info.raw.framerate.num, self->format.info.raw.framerate.denom);
uint8_t params_buffer[2048];
struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
const struct spa_pod *params[5];
int param_index = 0;
params[param_index++] = spa_pod_builder_add_object(
&pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
SPA_PARAM_META_size,
SPA_POD_Int(sizeof(struct spa_meta_region)));
params[param_index++] = spa_pod_builder_add_object(
&pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
sizeof(struct spa_meta_region) * 16,
sizeof(struct spa_meta_region) * 1,
sizeof(struct spa_meta_region) * 16));
params[param_index++] = spa_pod_builder_add_object(
&pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
SPA_PARAM_META_size,
SPA_POD_CHOICE_RANGE_Int(CURSOR_META_SIZE(64, 64),
CURSOR_META_SIZE(1, 1),
CURSOR_META_SIZE(1024, 1024)));
params[param_index++] = spa_pod_builder_add_object(
&pod_builder, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
SPA_PARAM_BUFFERS_dataType, SPA_POD_Int(buffer_types));
#if PW_CHECK_VERSION(0, 3, 62)
if (check_pw_version(&self->server_version, 0, 3, 62)) {
/* Video transformation */
params[param_index++] = spa_pod_builder_add_object(&pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type,
SPA_POD_Id(SPA_META_VideoTransform),
SPA_PARAM_META_size,
SPA_POD_Int(sizeof(struct spa_meta_videotransform)));
}
#endif
pw_stream_update_params(self->stream, params, param_index);
self->negotiated = true;
}
static void on_state_changed_cb(void *user_data, enum pw_stream_state prev_state, enum pw_stream_state new_state, const char *error) {
gsr_pipewire_video *self = user_data;
fprintf(stderr, "gsr info: pipewire: stream %p previous state: \"%s\", new state: \"%s\" (error: %s)\n",
(void*)self->stream, pw_stream_state_as_string(prev_state), pw_stream_state_as_string(new_state),
error ? error : "none");
pthread_mutex_lock(&self->mutex);
if(new_state == PW_STREAM_STATE_PAUSED) {
self->paused_start_secs = clock_get_monotonic_seconds();
self->paused = true;
} else {
self->paused = false;
}
pthread_mutex_unlock(&self->mutex);
}
static const struct pw_stream_events stream_events = {
PW_VERSION_STREAM_EVENTS,
.state_changed = on_state_changed_cb,
.param_changed = on_param_changed_cb,
.process = on_process_cb,
};
static inline struct spa_pod *build_format(struct spa_pod_builder *b,
const gsr_pipewire_video_video_info *ovi,
uint32_t format, const uint64_t *modifiers,
size_t modifier_count)
{
struct spa_pod_frame format_frame;
spa_pod_builder_push_object(b, &format_frame, SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
spa_pod_builder_add(b, SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), 0);
spa_pod_builder_add(b, SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), 0);
spa_pod_builder_add(b, SPA_FORMAT_VIDEO_format, SPA_POD_Id(format), 0);
if (modifier_count > 0) {
struct spa_pod_frame modifier_frame;
spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY | SPA_POD_PROP_FLAG_DONT_FIXATE);
spa_pod_builder_push_choice(b, &modifier_frame, SPA_CHOICE_Enum, 0);
/* The first element of choice pods is the preferred value. Here
* we arbitrarily pick the first modifier as the preferred one.
*/
// TODO:
spa_pod_builder_long(b, modifiers[0]);
for(uint32_t i = 0; i < modifier_count; i++)
spa_pod_builder_long(b, modifiers[i]);
spa_pod_builder_pop(b, &modifier_frame);
}
spa_pod_builder_add(b, SPA_FORMAT_VIDEO_size,
SPA_POD_CHOICE_RANGE_Rectangle(
&SPA_RECTANGLE(32, 32),
&SPA_RECTANGLE(1, 1),
&SPA_RECTANGLE(16384, 16384)),
SPA_FORMAT_VIDEO_framerate,
SPA_POD_CHOICE_RANGE_Fraction(
&SPA_FRACTION(ovi->fps_num, ovi->fps_den),
&SPA_FRACTION(0, 1), &SPA_FRACTION(500, 1)),
0);
return spa_pod_builder_pop(b, &format_frame);
}
/* https://gstreamer.freedesktop.org/documentation/additional/design/mediatype-video-raw.html?gi-language=c#formats */
/* For some reason gstreamer formats are in opposite order to drm formats */
static int64_t spa_video_format_to_drm_format(const enum spa_video_format format) {
switch(format) {
case SPA_VIDEO_FORMAT_RGBx: return DRM_FORMAT_XBGR8888;
case SPA_VIDEO_FORMAT_BGRx: return DRM_FORMAT_XRGB8888;
case SPA_VIDEO_FORMAT_RGBA: return DRM_FORMAT_ABGR8888;
case SPA_VIDEO_FORMAT_BGRA: return DRM_FORMAT_ARGB8888;
case SPA_VIDEO_FORMAT_RGB: return DRM_FORMAT_XBGR8888;
case SPA_VIDEO_FORMAT_BGR: return DRM_FORMAT_XRGB8888;
//case SPA_VIDEO_FORMAT_ARGB: return DRM_FORMAT_BGRA8888;
//case SPA_VIDEO_FORMAT_ABGR: return DRM_FORMAT_RGBA8888;
#if PW_CHECK_VERSION(0, 3, 41)
case SPA_VIDEO_FORMAT_xRGB_210LE: return DRM_FORMAT_XRGB2101010;
case SPA_VIDEO_FORMAT_xBGR_210LE: return DRM_FORMAT_XBGR2101010;
case SPA_VIDEO_FORMAT_ARGB_210LE: return DRM_FORMAT_ARGB2101010;
case SPA_VIDEO_FORMAT_ABGR_210LE: return DRM_FORMAT_ABGR2101010;
#endif
default: break;
}
return DRM_FORMAT_INVALID;
}
#if PW_CHECK_VERSION(0, 3, 41)
#define GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS GSR_PIPEWIRE_VIDEO_MAX_VIDEO_FORMATS
#else
#define GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS 6
#endif
static const enum spa_video_format video_formats[GSR_PIPEWIRE_VIDEO_MAX_VIDEO_FORMATS] = {
SPA_VIDEO_FORMAT_BGRx,
SPA_VIDEO_FORMAT_BGR,
SPA_VIDEO_FORMAT_RGBx,
SPA_VIDEO_FORMAT_RGB,
SPA_VIDEO_FORMAT_RGBA,
SPA_VIDEO_FORMAT_BGRA,
//SPA_VIDEO_FORMAT_ARGB,
//SPA_VIDEO_FORMAT_ABGR,
#if PW_CHECK_VERSION(0, 3, 41)
SPA_VIDEO_FORMAT_xRGB_210LE,
SPA_VIDEO_FORMAT_xBGR_210LE,
SPA_VIDEO_FORMAT_ARGB_210LE,
SPA_VIDEO_FORMAT_ABGR_210LE,
#endif
};
static bool gsr_pipewire_video_build_format_params(gsr_pipewire_video *self, struct spa_pod_builder *pod_builder, struct spa_pod **params, uint32_t *num_params) {
*num_params = 0;
if(!check_pw_version(&self->server_version, 0, 3, 33))
return false;
for(size_t i = 0; i < GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS; i++) {
if(self->supported_video_formats[i].modifiers_size == 0)
continue;
params[*num_params] = build_format(pod_builder, &self->video_info, self->supported_video_formats[i].format, self->modifiers + self->supported_video_formats[i].modifiers_index, self->supported_video_formats[i].modifiers_size);
++(*num_params);
}
return true;
}
static void renegotiate_format(void *data, uint64_t expirations) {
(void)expirations;
gsr_pipewire_video *self = (gsr_pipewire_video*)data;
pw_thread_loop_lock(self->thread_loop);
struct spa_pod *params[GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS];
uint32_t num_video_formats = 0;
uint8_t params_buffer[4096];
struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
if (!gsr_pipewire_video_build_format_params(self, &pod_builder, params, &num_video_formats)) {
fprintf(stderr, "gsr error: renegotiate_format: failed to build formats\n");
pw_thread_loop_unlock(self->thread_loop);
return;
}
pw_stream_update_params(self->stream, (const struct spa_pod**)params, num_video_formats);
pw_thread_loop_unlock(self->thread_loop);
}
static bool spa_video_format_get_modifiers(gsr_pipewire_video *self, const enum spa_video_format format, uint64_t *modifiers, int32_t max_modifiers, int32_t *num_modifiers) {
*num_modifiers = 0;
if(max_modifiers == 0) {
fprintf(stderr, "gsr error: spa_video_format_get_modifiers: no space for modifiers left\n");
//modifiers[0] = DRM_FORMAT_MOD_LINEAR;
//modifiers[1] = DRM_FORMAT_MOD_INVALID;
//*num_modifiers = 2;
return false;
}
if(!self->egl->eglQueryDmaBufModifiersEXT) {
fprintf(stderr, "gsr error: spa_video_format_get_modifiers: failed to initialize modifiers because eglQueryDmaBufModifiersEXT is not available\n");
//modifiers[0] = DRM_FORMAT_MOD_LINEAR;
//modifiers[1] = DRM_FORMAT_MOD_INVALID;
//*num_modifiers = 2;
modifiers[0] = DRM_FORMAT_MOD_INVALID;
*num_modifiers = 1;
return false;
}
const int64_t drm_format = spa_video_format_to_drm_format(format);
if(drm_format == DRM_FORMAT_INVALID) {
fprintf(stderr, "gsr error: spa_video_format_get_modifiers: unsupported format: %d\n", (int)format);
return false;
}
if(!self->egl->eglQueryDmaBufModifiersEXT(self->egl->egl_display, drm_format, max_modifiers, modifiers, NULL, num_modifiers)) {
fprintf(stderr, "gsr error: spa_video_format_get_modifiers: eglQueryDmaBufModifiersEXT failed with drm format %d, %" PRIi64 "\n", (int)format, drm_format);
//modifiers[0] = DRM_FORMAT_MOD_LINEAR;
//modifiers[1] = DRM_FORMAT_MOD_INVALID;
//*num_modifiers = 2;
modifiers[0] = DRM_FORMAT_MOD_INVALID;
*num_modifiers = 1;
return false;
}
// if(*num_modifiers + 2 <= max_modifiers) {
// modifiers[*num_modifiers + 0] = DRM_FORMAT_MOD_LINEAR;
// modifiers[*num_modifiers + 1] = DRM_FORMAT_MOD_INVALID;
// *num_modifiers += 2;
// }
return true;
}
static void gsr_pipewire_video_init_modifiers(gsr_pipewire_video *self) {
for(size_t i = 0; i < GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS; i++) {
self->supported_video_formats[i].format = video_formats[i];
int32_t num_modifiers = 0;
spa_video_format_get_modifiers(self, self->supported_video_formats[i].format, self->modifiers + self->num_modifiers, GSR_PIPEWIRE_VIDEO_MAX_MODIFIERS - self->num_modifiers, &num_modifiers);
self->supported_video_formats[i].modifiers_index = self->num_modifiers;
self->supported_video_formats[i].modifiers_size = num_modifiers;
self->num_modifiers += num_modifiers;
}
}
static void gsr_pipewire_video_format_remove_modifier(gsr_pipewire_video *self, gsr_video_format *video_format, uint64_t modifier) {
for(size_t i = 0; i < video_format->modifiers_size; ++i) {
if(self->modifiers[video_format->modifiers_index + i] != modifier)
continue;
for(size_t j = i + 1; j < video_format->modifiers_size; ++j) {
self->modifiers[j - 1] = self->modifiers[j];
}
--video_format->modifiers_size;
return;
}
}
static void gsr_pipewire_video_remove_modifier(gsr_pipewire_video *self, uint64_t modifier) {
for(size_t i = 0; i < GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS; i++) {
gsr_video_format *video_format = &self->supported_video_formats[i];
gsr_pipewire_video_format_remove_modifier(self, video_format, modifier);
}
}
static bool gsr_pipewire_video_setup_stream(gsr_pipewire_video *self) {
struct spa_pod *params[GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS];
uint32_t num_video_formats = 0;
uint8_t params_buffer[4096];
struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
self->thread_loop = pw_thread_loop_new("gsr screen capture", NULL);
if(!self->thread_loop) {
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire thread\n");
goto error;
}
self->context = pw_context_new(pw_thread_loop_get_loop(self->thread_loop), NULL, 0);
if(!self->context) {
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire context\n");
goto error;
}
if(pw_thread_loop_start(self->thread_loop) < 0) {
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to start thread\n");
goto error;
}
pw_thread_loop_lock(self->thread_loop);
// TODO: Why pass 5 to fcntl?
self->core = pw_context_connect_fd(self->context, fcntl(self->fd, F_DUPFD_CLOEXEC, 5), NULL, 0);
if(!self->core) {
pw_thread_loop_unlock(self->thread_loop);
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to connect to fd %d\n", self->fd);
goto error;
}
// TODO: Error check
pw_core_add_listener(self->core, &self->core_listener, &core_events, self);
self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, 0);
pw_thread_loop_wait(self->thread_loop);
gsr_pipewire_video_init_modifiers(self);
// TODO: Cleanup?
self->reneg = pw_loop_add_event(pw_thread_loop_get_loop(self->thread_loop), renegotiate_format, self);
if(!self->reneg) {
pw_thread_loop_unlock(self->thread_loop);
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: pw_loop_add_event failed\n");
goto error;
}
self->stream = pw_stream_new(self->core, "com.dec05eba.gpu_screen_recorder",
pw_properties_new(PW_KEY_MEDIA_TYPE, "Video",
PW_KEY_MEDIA_CATEGORY, "Capture",
PW_KEY_MEDIA_ROLE, "Screen", NULL));
if(!self->stream) {
pw_thread_loop_unlock(self->thread_loop);
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create stream\n");
goto error;
}
pw_stream_add_listener(self->stream, &self->stream_listener, &stream_events, self);
if(!gsr_pipewire_video_build_format_params(self, &pod_builder, params, &num_video_formats)) {
pw_thread_loop_unlock(self->thread_loop);
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to build format params\n");
goto error;
}
if(pw_stream_connect(
self->stream, PW_DIRECTION_INPUT, self->node,
PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_MAP_BUFFERS, (const struct spa_pod**)params,
num_video_formats) < 0)
{
pw_thread_loop_unlock(self->thread_loop);
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to connect stream\n");
goto error;
}
pw_thread_loop_unlock(self->thread_loop);
return true;
error:
if(self->thread_loop) {
//pw_thread_loop_wait(self->thread_loop);
pw_thread_loop_stop(self->thread_loop);
}
if(self->stream) {
pw_stream_disconnect(self->stream);
pw_stream_destroy(self->stream);
self->stream = NULL;
}
if(self->core) {
pw_core_disconnect(self->core);
self->core = NULL;
}
if(self->context) {
pw_context_destroy(self->context);
self->context = NULL;
}
if(self->thread_loop) {
pw_thread_loop_destroy(self->thread_loop);
self->thread_loop = NULL;
}
return false;
}
static int pw_init_counter = 0;
bool gsr_pipewire_video_init(gsr_pipewire_video *self, int pipewire_fd, uint32_t pipewire_node, int fps, bool capture_cursor, gsr_egl *egl) {
if(pw_init_counter == 0)
pw_init(NULL, NULL);
++pw_init_counter;
memset(self, 0, sizeof(*self));
self->egl = egl;
self->fd = pipewire_fd;
self->node = pipewire_node;
if(pthread_mutex_init(&self->mutex, NULL) != 0) {
fprintf(stderr, "gsr error: gsr_pipewire_video_init: failed to initialize mutex\n");
gsr_pipewire_video_deinit(self);
return false;
}
self->mutex_initialized = true;
self->video_info.fps_num = fps;
self->video_info.fps_den = 1;
self->cursor.visible = capture_cursor;
if(!gsr_pipewire_video_setup_stream(self)) {
gsr_pipewire_video_deinit(self);
return false;
}
return true;
}
void gsr_pipewire_video_deinit(gsr_pipewire_video *self) {
if(self->thread_loop) {
//pw_thread_loop_wait(self->thread_loop);
pw_thread_loop_stop(self->thread_loop);
}
if(self->stream) {
pw_stream_disconnect(self->stream);
pw_stream_destroy(self->stream);
self->stream = NULL;
}
if(self->core) {
pw_core_disconnect(self->core);
self->core = NULL;
}
if(self->context) {
pw_context_destroy(self->context);
self->context = NULL;
}
if(self->thread_loop) {
pw_thread_loop_destroy(self->thread_loop);
self->thread_loop = NULL;
}
if(self->fd > 0) {
close(self->fd);
self->fd = -1;
}
for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
if(self->dmabuf_data[i].fd > 0) {
close(self->dmabuf_data[i].fd);
self->dmabuf_data[i].fd = -1;
}
}
self->dmabuf_num_planes = 0;
self->negotiated = false;
if(self->mutex_initialized) {
pthread_mutex_destroy(&self->mutex);
self->mutex_initialized = false;
}
if(self->cursor.data) {
free(self->cursor.data);
self->cursor.data = NULL;
}
--pw_init_counter;
if(pw_init_counter == 0) {
#if PW_CHECK_VERSION(0, 3, 49)
pw_deinit();
#endif
}
}
static EGLImage gsr_pipewire_video_create_egl_image(gsr_pipewire_video *self, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, bool use_modifiers) {
intptr_t img_attr[44];
setup_dma_buf_attrs(img_attr, spa_video_format_to_drm_format(self->format.info.raw.format), self->format.info.raw.size.width, self->format.info.raw.size.height,
fds, offsets, pitches, modifiers, self->dmabuf_num_planes, use_modifiers);
while(self->egl->eglGetError() != EGL_SUCCESS){}
EGLImage image = self->egl->eglCreateImage(self->egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
if(!image || self->egl->eglGetError() != EGL_SUCCESS) {
if(image)
self->egl->eglDestroyImage(self->egl->egl_display, image);
return NULL;
}
return image;
}
static EGLImage gsr_pipewire_video_create_egl_image_with_fallback(gsr_pipewire_video *self) {
int fds[GSR_PIPEWIRE_VIDEO_DMABUF_MAX_PLANES];
uint32_t offsets[GSR_PIPEWIRE_VIDEO_DMABUF_MAX_PLANES];
uint32_t pitches[GSR_PIPEWIRE_VIDEO_DMABUF_MAX_PLANES];
uint64_t modifiers[GSR_PIPEWIRE_VIDEO_DMABUF_MAX_PLANES];
for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
fds[i] = self->dmabuf_data[i].fd;
offsets[i] = self->dmabuf_data[i].offset;
pitches[i] = self->dmabuf_data[i].stride;
modifiers[i] = self->format.info.raw.modifier;
}
EGLImage image = NULL;
if(self->no_modifiers_fallback || !self->has_modifier) {
image = gsr_pipewire_video_create_egl_image(self, fds, offsets, pitches, modifiers, false);
} else {
image = gsr_pipewire_video_create_egl_image(self, fds, offsets, pitches, modifiers, true);
if(!image) {
if(self->format.info.raw.modifier == DRM_FORMAT_MOD_INVALID) {
fprintf(stderr, "gsr error: gsr_pipewire_video_create_egl_image_with_fallback: failed to create egl image with modifiers, trying without modifiers\n");
self->no_modifiers_fallback = true;
image = gsr_pipewire_video_create_egl_image(self, fds, offsets, pitches, modifiers, false);
} else {
fprintf(stderr, "gsr error: gsr_pipewire_video_create_egl_image_with_fallback: failed to create egl image with modifiers, renegotiating with a different modifier\n");
self->negotiated = false;
pw_thread_loop_lock(self->thread_loop);
gsr_pipewire_video_remove_modifier(self, self->format.info.raw.modifier);
pw_loop_signal_event(pw_thread_loop_get_loop(self->thread_loop), self->reneg);
pw_thread_loop_unlock(self->thread_loop);
}
}
}
return image;
}
static bool gsr_pipewire_video_bind_image_to_texture(gsr_pipewire_video *self, EGLImage image, unsigned int texture_id, bool external_texture) {
const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
while(self->egl->glGetError() != 0){}
self->egl->glBindTexture(texture_target, texture_id);
self->egl->glEGLImageTargetTexture2DOES(texture_target, image);
const bool success = self->egl->glGetError() == 0;
self->egl->glBindTexture(texture_target, 0);
return success;
}
static void gsr_pipewire_video_bind_image_to_texture_with_fallback(gsr_pipewire_video *self, gsr_texture_map texture_map, EGLImage image) {
if(self->external_texture_fallback) {
gsr_pipewire_video_bind_image_to_texture(self, image, texture_map.external_texture_id, true);
} else {
if(!gsr_pipewire_video_bind_image_to_texture(self, image, texture_map.texture_id, false)) {
fprintf(stderr, "gsr error: gsr_pipewire_video_map_texture: failed to bind image to texture, trying with external texture\n");
self->external_texture_fallback = true;
gsr_pipewire_video_bind_image_to_texture(self, image, texture_map.external_texture_id, true);
}
}
}
static void gsr_pipewire_video_update_cursor_texture(gsr_pipewire_video *self, gsr_texture_map texture_map) {
if(!self->cursor.data)
return;
self->egl->glBindTexture(GL_TEXTURE_2D, texture_map.cursor_texture_id);
// TODO: glTextureSubImage2D if same size
self->egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self->cursor.width, self->cursor.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, self->cursor.data);
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
free(self->cursor.data);
self->cursor.data = NULL;
}
bool gsr_pipewire_video_map_texture(gsr_pipewire_video *self, gsr_texture_map texture_map, gsr_map_texture_output *output) {
for(int i = 0; i < GSR_PIPEWIRE_VIDEO_DMABUF_MAX_PLANES; ++i) {
memset(&output->dmabuf_data[i], 0, sizeof(gsr_pipewire_video_dmabuf_data));
}
output->num_dmabuf_data = 0;
output->using_external_image = self->external_texture_fallback;
output->fourcc = 0;
output->modifiers = 0;
output->rotation = GSR_MONITOR_ROT_0;
pthread_mutex_lock(&self->mutex);
if(!self->negotiated || self->dmabuf_data[0].fd <= 0) {
pthread_mutex_unlock(&self->mutex);
return false;
}
EGLImage image = gsr_pipewire_video_create_egl_image_with_fallback(self);
if(!image) {
pthread_mutex_unlock(&self->mutex);
return false;
}
gsr_pipewire_video_bind_image_to_texture_with_fallback(self, texture_map, image);
output->using_external_image = self->external_texture_fallback;
self->egl->eglDestroyImage(self->egl->egl_display, image);
gsr_pipewire_video_update_cursor_texture(self, texture_map);
output->texture_width = self->format.info.raw.size.width;
output->texture_height = self->format.info.raw.size.height;
output->region.x = 0;
output->region.y = 0;
output->region.width = self->format.info.raw.size.width;
output->region.height = self->format.info.raw.size.height;
if(self->crop.valid) {
output->region.x = self->crop.x;
output->region.y = self->crop.y;
output->region.width = self->crop.width;
output->region.height = self->crop.height;
}
// TODO: Test transform + cropping
if(self->rotation == GSR_MONITOR_ROT_90 || self->rotation == GSR_MONITOR_ROT_270) {
const int temp = output->region.width;
output->region.width = output->region.height;
output->region.height = temp;
}
/* TODO: Test if cursor hotspot is correct */
output->cursor_region.x = self->cursor.x - self->cursor.hotspot_x;
output->cursor_region.y = self->cursor.y - self->cursor.hotspot_y;
output->cursor_region.width = self->cursor.width;
output->cursor_region.height = self->cursor.height;
for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
output->dmabuf_data[i] = self->dmabuf_data[i];
self->dmabuf_data[i].fd = -1;
}
output->num_dmabuf_data = self->dmabuf_num_planes;
output->fourcc = spa_video_format_to_drm_format(self->format.info.raw.format);
output->modifiers = self->format.info.raw.modifier;
output->rotation = self->rotation;
self->dmabuf_num_planes = 0;
pthread_mutex_unlock(&self->mutex);
return true;
}
bool gsr_pipewire_video_is_damaged(gsr_pipewire_video *self) {
if(!self->mutex_initialized)
return false;
bool damaged = false;
pthread_mutex_lock(&self->mutex);
damaged = self->damaged;
pthread_mutex_unlock(&self->mutex);
return damaged;
}
void gsr_pipewire_video_clear_damage(gsr_pipewire_video *self) {
if(!self->mutex_initialized)
return;
pthread_mutex_lock(&self->mutex);
self->damaged = false;
pthread_mutex_unlock(&self->mutex);
}
bool gsr_pipewire_video_should_restart(gsr_pipewire_video *self) {
if(!self->mutex_initialized)
return false;
bool should_restart = false;
pthread_mutex_lock(&self->mutex);
should_restart = self->paused && clock_get_monotonic_seconds() - self->paused_start_secs >= 3.0;
pthread_mutex_unlock(&self->mutex);
return should_restart;
}

138
src/plugins.c Normal file
View File

@ -0,0 +1,138 @@
#include "../include/plugins.h"
#include "../include/utils.h"
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
#include <assert.h>
static int color_depth_to_gl_internal_format(gsr_plugin_color_depth color_depth) {
switch(color_depth) {
case GSR_PLUGIN_COLOR_DEPTH_8_BITS:
return GL_RGBA8;
case GSR_PLUGIN_COLOR_DEPTH_10_BITS:
return GL_RGBA16;
}
assert(false);
return GL_RGBA8;
}
bool gsr_plugins_init(gsr_plugins *self, gsr_plugin_init_params init_params, gsr_egl *egl) {
memset(self, 0, sizeof(*self));
self->init_params = init_params;
self->egl = egl;
/* TODO: GL_RGB8? */
const unsigned int texture = gl_create_texture(egl, init_params.width, init_params.height, color_depth_to_gl_internal_format(init_params.color_depth), GL_RGBA, GL_LINEAR);
if(texture == 0) {
fprintf(stderr, "gsr error: gsr_plugins_init failed to create texture\n");
return false;
}
self->texture = texture;
gsr_color_conversion_params color_conversion_params = {
.egl = egl,
.destination_color = GSR_DESTINATION_COLOR_RGB8, /* TODO: Support 10-bits, use init_params.color_depth */
.destination_textures[0] = self->texture,
.destination_textures_size[0] = (vec2i){ init_params.width, init_params.height },
.num_destination_textures = 1,
.color_range = GSR_COLOR_RANGE_FULL,
.load_external_image_shader = false,
//.force_graphics_shader = false,
};
color_conversion_params.destination_textures[0] = self->texture;
if(gsr_color_conversion_init(&self->color_conversion, &color_conversion_params) != 0) {
fprintf(stderr, "gsr error: gsr_plugins_init failed to create color conversion\n");
gsr_plugins_deinit(self);
return false;
}
gsr_color_conversion_clear(&self->color_conversion);
return true;
}
void gsr_plugins_deinit(gsr_plugins *self) {
for(int i = self->num_plugins - 1; i >= 0; --i) {
gsr_plugin *plugin = &self->plugins[i];
plugin->gsr_plugin_deinit(plugin->data.userdata);
fprintf(stderr, "gsr info: unloaded plugin: %s\n", plugin->data.name);
}
self->num_plugins = 0;
if(self->texture > 0) {
self->egl->glDeleteTextures(1, &self->texture);
self->texture = 0;
}
gsr_color_conversion_deinit(&self->color_conversion);
}
bool gsr_plugins_load_plugin(gsr_plugins *self, const char *plugin_filepath) {
if(self->num_plugins >= GSR_MAX_PLUGINS) {
fprintf(stderr, "gsr error: gsr_plugins_load_plugin failed, more plugins can't load more than %d plugins. Report this as an issue\n", GSR_MAX_PLUGINS);
return false;
}
gsr_plugin plugin;
memset(&plugin, 0, sizeof(plugin));
plugin.lib = dlopen(plugin_filepath, RTLD_LAZY);
if(!plugin.lib) {
fprintf(stderr, "gsr error: gsr_plugins_load_plugin failed to load \"%s\", error: %s\n", plugin_filepath, dlerror());
return false;
}
plugin.gsr_plugin_init = dlsym(plugin.lib, "gsr_plugin_init");
if(!plugin.gsr_plugin_init) {
fprintf(stderr, "gsr error: gsr_plugins_load_plugin failed to find \"gsr_plugin_init\" in plugin \"%s\"\n", plugin_filepath);
goto fail;
}
plugin.gsr_plugin_deinit = dlsym(plugin.lib, "gsr_plugin_deinit");
if(!plugin.gsr_plugin_deinit) {
fprintf(stderr, "gsr error: gsr_plugins_load_plugin failed to find \"gsr_plugin_deinit\" in plugin \"%s\"\n", plugin_filepath);
goto fail;
}
if(!plugin.gsr_plugin_init(&self->init_params, &plugin.data)) {
fprintf(stderr, "gsr error: gsr_plugins_load_plugin failed to load plugin \"%s\", gsr_plugin_init in the plugin failed\n", plugin_filepath);
goto fail;
}
if(!plugin.data.name) {
fprintf(stderr, "gsr error: gsr_plugins_load_plugin failed to load plugin \"%s\", the plugin didn't set the name (gsr_plugin_init_return.name)\n", plugin_filepath);
goto fail;
}
if(plugin.data.version == 0) {
fprintf(stderr, "gsr error: gsr_plugins_load_plugin failed to load plugin \"%s\", the plugin didn't set the version (gsr_plugin_init_return.version)\n", plugin_filepath);
goto fail;
}
fprintf(stderr, "gsr info: loaded plugin: %s, name: %s, version: %u\n", plugin_filepath, plugin.data.name, plugin.data.version);
self->plugins[self->num_plugins] = plugin;
++self->num_plugins;
return true;
fail:
dlclose(plugin.lib);
return false;
}
void gsr_plugins_draw(gsr_plugins *self) {
const gsr_plugin_draw_params params = {
.width = self->init_params.width,
.height = self->init_params.height,
};
self->egl->glBindFramebuffer(GL_FRAMEBUFFER, self->color_conversion.framebuffers[0]);
self->egl->glViewport(0, 0, self->init_params.width, self->init_params.height);
for(int i = 0; i < self->num_plugins; ++i) {
const gsr_plugin *plugin = &self->plugins[i];
if(plugin->data.draw)
plugin->data.draw(&params, plugin->data.userdata);
}
self->egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
}

View File

@ -0,0 +1,91 @@
#include "../../include/replay_buffer/replay_buffer.h"
#include "../../include/replay_buffer/replay_buffer_ram.h"
#include "../../include/replay_buffer/replay_buffer_disk.h"
#include <stdlib.h>
#include <string.h>
#include <assert.h>
gsr_replay_buffer* gsr_replay_buffer_create(gsr_replay_storage replay_storage, const char *replay_directory, double replay_buffer_time, size_t replay_buffer_num_packets) {
gsr_replay_buffer *replay_buffer = NULL;
switch(replay_storage) {
case GSR_REPLAY_STORAGE_RAM:
replay_buffer = gsr_replay_buffer_ram_create(replay_buffer_num_packets);
break;
case GSR_REPLAY_STORAGE_DISK:
replay_buffer = gsr_replay_buffer_disk_create(replay_directory, replay_buffer_time);
break;
}
replay_buffer->mutex_initialized = false;
replay_buffer->original_replay_buffer = NULL;
if(pthread_mutex_init(&replay_buffer->mutex, NULL) != 0) {
gsr_replay_buffer_destroy(replay_buffer);
return NULL;
}
replay_buffer->mutex_initialized = true;
return replay_buffer;
}
void gsr_replay_buffer_destroy(gsr_replay_buffer *self) {
self->destroy(self);
if(self->mutex_initialized && !self->original_replay_buffer) {
pthread_mutex_destroy(&self->mutex);
self->mutex_initialized = false;
}
self->original_replay_buffer = NULL;
free(self);
}
void gsr_replay_buffer_lock(gsr_replay_buffer *self) {
if(self->original_replay_buffer) {
gsr_replay_buffer_lock(self->original_replay_buffer);
return;
}
if(self->mutex_initialized)
pthread_mutex_lock(&self->mutex);
}
void gsr_replay_buffer_unlock(gsr_replay_buffer *self) {
if(self->original_replay_buffer) {
gsr_replay_buffer_unlock(self->original_replay_buffer);
return;
}
if(self->mutex_initialized)
pthread_mutex_unlock(&self->mutex);
}
bool gsr_replay_buffer_append(gsr_replay_buffer *self, const AVPacket *av_packet, double timestamp) {
return self->append(self, av_packet, timestamp);
}
void gsr_replay_buffer_clear(gsr_replay_buffer *self) {
self->clear(self);
}
AVPacket* gsr_replay_buffer_iterator_get_packet(gsr_replay_buffer *self, gsr_replay_buffer_iterator iterator) {
return self->iterator_get_packet(self, iterator);
}
uint8_t* gsr_replay_buffer_iterator_get_packet_data(gsr_replay_buffer *self, gsr_replay_buffer_iterator iterator) {
return self->iterator_get_packet_data(self, iterator);
}
gsr_replay_buffer* gsr_replay_buffer_clone(gsr_replay_buffer *self) {
return self->clone(self);
}
gsr_replay_buffer_iterator gsr_replay_buffer_find_packet_index_by_time_passed(gsr_replay_buffer *self, int seconds) {
return self->find_packet_index_by_time_passed(self, seconds);
}
gsr_replay_buffer_iterator gsr_replay_buffer_find_keyframe(gsr_replay_buffer *self, gsr_replay_buffer_iterator start_iterator, int stream_index, bool invert_stream_index) {
return self->find_keyframe(self, start_iterator, stream_index, invert_stream_index);
}
bool gsr_replay_buffer_iterator_next(gsr_replay_buffer *self, gsr_replay_buffer_iterator *iterator) {
return self->iterator_next(self, iterator);
}

View File

@ -0,0 +1,437 @@
#include "../../include/replay_buffer/replay_buffer_disk.h"
#include "../../include/utils.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <time.h>
#include <errno.h>
#include <assert.h>
#define REPLAY_BUFFER_FILE_SIZE_BYTES 1024 * 1024 * 256 /* 256MB */
#define FILE_PREFIX "Replay"
static void gsr_replay_buffer_disk_set_impl_funcs(gsr_replay_buffer_disk *self);
static void gsr_av_packet_disk_init(gsr_av_packet_disk *self, const AVPacket *av_packet, size_t data_index, double timestamp) {
self->packet = *av_packet;
self->packet.data = NULL;
self->data_index = data_index;
self->timestamp = timestamp;
}
static gsr_replay_buffer_file* gsr_replay_buffer_file_create(char *replay_directory, size_t replay_storage_counter, double timestamp, int *replay_storage_fd) {
gsr_replay_buffer_file *self = calloc(1, sizeof(gsr_replay_buffer_file));
if(!self) {
fprintf(stderr, "gsr error: gsr_av_packet_file_init: failed to create buffer file\n");
return NULL;
}
if(create_directory_recursive(replay_directory) != 0) {
fprintf(stderr, "gsr error: gsr_av_packet_file_init: failed to create replay directory: %s\n", replay_directory);
free(self);
return NULL;
}
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/%s_%d.gsr", replay_directory, FILE_PREFIX, (int)replay_storage_counter);
*replay_storage_fd = creat(filename, 0700);
if(*replay_storage_fd <= 0) {
fprintf(stderr, "gsr error: gsr_av_packet_file_init: failed to create replay file: %s\n", filename);
free(self);
return NULL;
}
self->id = replay_storage_counter;
self->start_timestamp = timestamp;
self->end_timestamp = timestamp;
self->ref_counter = 1;
self->fd = -1;
self->packets = NULL;
self->capacity_num_packets = 0;
self->num_packets = 0;
return self;
}
static gsr_replay_buffer_file* gsr_replay_buffer_file_ref(gsr_replay_buffer_file *self) {
if(self->ref_counter >= 1)
++self->ref_counter;
return self;
}
static void gsr_replay_buffer_file_free(gsr_replay_buffer_file *self, const char *replay_directory) {
self->ref_counter = 0;
if(self->fd > 0) {
close(self->fd);
self->fd = -1;
}
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/%s_%d.gsr", replay_directory, FILE_PREFIX, (int)self->id);
remove(filename);
if(self->packets) {
free(self->packets);
self->packets = NULL;
}
self->num_packets = 0;
self->capacity_num_packets = 0;
free(self);
}
static void gsr_replay_buffer_file_unref(gsr_replay_buffer_file *self, const char *replay_directory) {
if(self->ref_counter > 0)
--self->ref_counter;
if(self->ref_counter <= 0)
gsr_replay_buffer_file_free(self, replay_directory);
}
static void gsr_replay_buffer_disk_clear(gsr_replay_buffer *replay_buffer) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
gsr_replay_buffer_lock(&self->replay_buffer);
for(size_t i = 0; i < self->num_files; ++i) {
gsr_replay_buffer_file_unref(self->files[i], self->replay_directory);
}
self->num_files = 0;
if(self->storage_fd > 0) {
close(self->storage_fd);
self->storage_fd = 0;
}
self->storage_num_bytes_written = 0;
gsr_replay_buffer_unlock(&self->replay_buffer);
}
static void gsr_replay_buffer_disk_destroy(gsr_replay_buffer *replay_buffer) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
gsr_replay_buffer_disk_clear(replay_buffer);
if(self->owns_directory) {
remove(self->replay_directory);
self->owns_directory = false;
}
}
static bool file_write_all(int fd, const uint8_t *data, size_t size, size_t *bytes_written_total) {
*bytes_written_total = 0;
while(*bytes_written_total < size) {
const ssize_t bytes_written = write(fd, data + *bytes_written_total, size - *bytes_written_total);
if(bytes_written == -1) {
if(errno == EAGAIN)
continue;
else
return false;
}
*bytes_written_total += bytes_written;
}
return true;
}
static bool gsr_replay_buffer_disk_create_next_file(gsr_replay_buffer_disk *self, double timestamp) {
if(self->num_files + 1 >= GSR_REPLAY_BUFFER_CAPACITY_NUM_FILES) {
fprintf(stderr, "gsr error: gsr_replay_buffer_disk_create_next_file: too many replay buffer files created! (> %d), either reduce the replay buffer time or report this as a bug\n", (int)GSR_REPLAY_BUFFER_CAPACITY_NUM_FILES);
return false;
}
gsr_replay_buffer_file *replay_buffer_file = gsr_replay_buffer_file_create(self->replay_directory, self->storage_counter, timestamp, &self->storage_fd);
if(!replay_buffer_file)
return false;
self->files[self->num_files] = replay_buffer_file;
++self->num_files;
++self->storage_counter;
return true;
}
static bool gsr_replay_buffer_disk_append_to_current_file(gsr_replay_buffer_disk *self, const AVPacket *av_packet, double timestamp) {
gsr_replay_buffer_file *replay_buffer_file = self->files[self->num_files - 1];
replay_buffer_file->end_timestamp = timestamp;
if(replay_buffer_file->num_packets + 1 >= replay_buffer_file->capacity_num_packets) {
size_t new_capacity_num_packets = replay_buffer_file->capacity_num_packets * 2;
if(new_capacity_num_packets == 0)
new_capacity_num_packets = 256;
void *new_packets = realloc(replay_buffer_file->packets, new_capacity_num_packets * sizeof(gsr_av_packet_disk));
if(!new_packets) {
fprintf(stderr, "gsr error: gsr_replay_buffer_disk_append_to_current_file: failed to reallocate replay buffer file packets\n");
return false;
}
replay_buffer_file->capacity_num_packets = new_capacity_num_packets;
replay_buffer_file->packets = new_packets;
}
gsr_av_packet_disk *packet = &replay_buffer_file->packets[replay_buffer_file->num_packets];
gsr_av_packet_disk_init(packet, av_packet, self->storage_num_bytes_written, timestamp);
++replay_buffer_file->num_packets;
size_t bytes_written = 0;
const bool file_written = file_write_all(self->storage_fd, av_packet->data, av_packet->size, &bytes_written);
self->storage_num_bytes_written += bytes_written;
if(self->storage_num_bytes_written >= REPLAY_BUFFER_FILE_SIZE_BYTES) {
self->storage_num_bytes_written = 0;
close(self->storage_fd);
self->storage_fd = 0;
}
return file_written;
}
static void gsr_replay_buffer_disk_remove_first_file(gsr_replay_buffer_disk *self) {
gsr_replay_buffer_file_unref(self->files[0], self->replay_directory);
for(size_t i = 1; i < self->num_files; ++i) {
self->files[i - 1] = self->files[i];
}
--self->num_files;
}
static bool gsr_replay_buffer_disk_append(gsr_replay_buffer *replay_buffer, const AVPacket *av_packet, double timestamp) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
bool success = false;
gsr_replay_buffer_lock(&self->replay_buffer);
if(self->storage_fd <= 0) {
if(!gsr_replay_buffer_disk_create_next_file(self, timestamp))
goto done;
}
const bool data_written = gsr_replay_buffer_disk_append_to_current_file(self, av_packet, timestamp);
if(self->num_files > 1) {
const double buffer_time_accumulated = timestamp - self->files[1]->start_timestamp;
if(buffer_time_accumulated >= self->replay_buffer_time)
gsr_replay_buffer_disk_remove_first_file(self);
}
success = data_written;
done:
gsr_replay_buffer_unlock(&self->replay_buffer);
return success;
}
static AVPacket* gsr_replay_buffer_disk_iterator_get_packet(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator iterator) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
assert(iterator.file_index < self->num_files);
assert(iterator.packet_index < self->files[iterator.file_index]->num_packets);
return &self->files[iterator.file_index]->packets[iterator.packet_index].packet;
}
static uint8_t* gsr_replay_buffer_disk_iterator_get_packet_data(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator iterator) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
assert(iterator.file_index < self->num_files);
gsr_replay_buffer_file *file = self->files[iterator.file_index];
assert(iterator.packet_index < file->num_packets);
if(file->fd <= 0) {
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/%s_%d.gsr", self->replay_directory, FILE_PREFIX, (int)file->id);
file->fd = open(filename, O_RDONLY);
if(file->fd <= 0) {
fprintf(stderr, "gsr error: gsr_replay_buffer_disk_iterator_get_packet_data: failed to open file\n");
return NULL;
}
}
const gsr_av_packet_disk *packet = &self->files[iterator.file_index]->packets[iterator.packet_index];
if(lseek(file->fd, packet->data_index, SEEK_SET) == -1) {
fprintf(stderr, "gsr error: gsr_replay_buffer_disk_iterator_get_packet_data: failed to seek\n");
return NULL;
}
uint8_t *packet_data = malloc(packet->packet.size);
if(read(file->fd, packet_data, packet->packet.size) != packet->packet.size) {
fprintf(stderr, "gsr error: gsr_replay_buffer_disk_iterator_get_packet_data: failed to read data from file\n");
free(packet_data);
return NULL;
}
return packet_data;
}
static gsr_replay_buffer* gsr_replay_buffer_disk_clone(gsr_replay_buffer *replay_buffer) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
gsr_replay_buffer_disk *destination = calloc(1, sizeof(gsr_replay_buffer_disk));
if(!destination)
return NULL;
gsr_replay_buffer_disk_set_impl_funcs(destination);
gsr_replay_buffer_lock(&self->replay_buffer);
destination->replay_buffer.original_replay_buffer = replay_buffer;
destination->replay_buffer.mutex = self->replay_buffer.mutex;
destination->replay_buffer.mutex_initialized = self->replay_buffer.mutex_initialized;
destination->replay_buffer_time = self->replay_buffer_time;
destination->storage_counter = self->storage_counter;
destination->storage_num_bytes_written = self->storage_num_bytes_written;
destination->storage_fd = 0; // We only want to read from the clone. If there is a need to write to it in the future then TODO change this
for(size_t i = 0; i < self->num_files; ++i) {
destination->files[i] = gsr_replay_buffer_file_ref(self->files[i]);
}
destination->num_files = self->num_files;
snprintf(destination->replay_directory, sizeof(destination->replay_directory), "%s", self->replay_directory);
destination->owns_directory = false;
gsr_replay_buffer_unlock(&self->replay_buffer);
return (gsr_replay_buffer*)destination;
}
/* Binary search */
static size_t gsr_replay_buffer_file_find_packet_index_by_time_passed(const gsr_replay_buffer_file *self, int seconds) {
const double now = clock_get_monotonic_seconds();
if(self->num_packets == 0) {
return 0;
}
size_t lower_bound = 0;
size_t upper_bound = self->num_packets;
size_t index = 0;
for(;;) {
index = lower_bound + (upper_bound - lower_bound) / 2;
const gsr_av_packet_disk *packet = &self->packets[index];
const double time_passed_since_packet = now - packet->timestamp;
if(time_passed_since_packet >= seconds) {
if(lower_bound == index)
break;
lower_bound = index;
} else {
if(upper_bound == index)
break;
upper_bound = index;
}
}
return index;
}
/* Binary search */
static gsr_replay_buffer_iterator gsr_replay_buffer_disk_find_file_index_by_time_passed(gsr_replay_buffer *replay_buffer, int seconds) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
gsr_replay_buffer_lock(&self->replay_buffer);
const double now = clock_get_monotonic_seconds();
if(self->num_files == 0) {
gsr_replay_buffer_unlock(&self->replay_buffer);
return (gsr_replay_buffer_iterator){0, 0};
}
size_t lower_bound = 0;
size_t upper_bound = self->num_files;
size_t file_index = 0;
for(;;) {
file_index = lower_bound + (upper_bound - lower_bound) / 2;
const gsr_replay_buffer_file *file = self->files[file_index];
const double time_passed_since_file_start = now - file->start_timestamp;
const double time_passed_since_file_end = now - file->end_timestamp;
if(time_passed_since_file_start >= seconds && time_passed_since_file_end <= seconds) {
break;
} else if(time_passed_since_file_start >= seconds) {
if(lower_bound == file_index)
break;
lower_bound = file_index;
} else {
if(upper_bound == file_index)
break;
upper_bound = file_index;
}
}
const gsr_replay_buffer_file *file = self->files[file_index];
const size_t packet_index = gsr_replay_buffer_file_find_packet_index_by_time_passed(file, seconds);
gsr_replay_buffer_unlock(&self->replay_buffer);
return (gsr_replay_buffer_iterator){packet_index, file_index};
}
static gsr_replay_buffer_iterator gsr_replay_buffer_disk_find_keyframe(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator start_iterator, int stream_index, bool invert_stream_index) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
gsr_replay_buffer_iterator keyframe_iterator = {(size_t)-1, 0};
gsr_replay_buffer_lock(&self->replay_buffer);
size_t packet_index = start_iterator.packet_index;
for(size_t file_index = start_iterator.file_index; file_index < self->num_files; ++file_index) {
const gsr_replay_buffer_file *file = self->files[file_index];
for(; packet_index < file->num_packets; ++packet_index) {
const gsr_av_packet_disk *packet = &file->packets[packet_index];
if((packet->packet.flags & AV_PKT_FLAG_KEY) && (invert_stream_index ? packet->packet.stream_index != stream_index : packet->packet.stream_index == stream_index)) {
keyframe_iterator.packet_index = packet_index;
keyframe_iterator.file_index = file_index;
goto done;
}
}
packet_index = 0;
}
done:
gsr_replay_buffer_unlock(&self->replay_buffer);
return keyframe_iterator;
}
static bool gsr_replay_buffer_disk_iterator_next(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator *iterator) {
gsr_replay_buffer_disk *self = (gsr_replay_buffer_disk*)replay_buffer;
if(iterator->file_index >= self->num_files)
return false;
if(iterator->packet_index + 1 >= self->files[iterator->file_index]->num_packets) {
if(iterator->file_index + 1 >= self->num_files)
return false;
if(self->files[iterator->file_index + 1]->num_packets == 0)
return false;
++iterator->file_index;
iterator->packet_index = 0;
return true;
} else {
++iterator->packet_index;
return true;
}
}
static void get_current_time(char *time_str, size_t time_str_size) {
time_t now = time(NULL);
struct tm *t = localtime(&now);
strftime(time_str, time_str_size - 1, "%Y-%m-%d_%H-%M-%S", t);
}
static void gsr_replay_buffer_disk_set_impl_funcs(gsr_replay_buffer_disk *self) {
self->replay_buffer.destroy = gsr_replay_buffer_disk_destroy;
self->replay_buffer.append = gsr_replay_buffer_disk_append;
self->replay_buffer.clear = gsr_replay_buffer_disk_clear;
self->replay_buffer.iterator_get_packet = gsr_replay_buffer_disk_iterator_get_packet;
self->replay_buffer.iterator_get_packet_data = gsr_replay_buffer_disk_iterator_get_packet_data;
self->replay_buffer.clone = gsr_replay_buffer_disk_clone;
self->replay_buffer.find_packet_index_by_time_passed = gsr_replay_buffer_disk_find_file_index_by_time_passed;
self->replay_buffer.find_keyframe = gsr_replay_buffer_disk_find_keyframe;
self->replay_buffer.iterator_next = gsr_replay_buffer_disk_iterator_next;
}
gsr_replay_buffer* gsr_replay_buffer_disk_create(const char *replay_directory, double replay_buffer_time) {
assert(replay_buffer_time > 0);
gsr_replay_buffer_disk *replay_buffer = calloc(1, sizeof(gsr_replay_buffer_disk));
if(!replay_buffer)
return NULL;
char time_str[128];
get_current_time(time_str, sizeof(time_str));
replay_buffer->num_files = 0;
replay_buffer->storage_counter = 0;
replay_buffer->replay_buffer_time = replay_buffer_time;
snprintf(replay_buffer->replay_directory, sizeof(replay_buffer->replay_directory), "%s/gsr-replay-%s.gsr", replay_directory, time_str);
replay_buffer->owns_directory = true;
gsr_replay_buffer_disk_set_impl_funcs(replay_buffer);
return (gsr_replay_buffer*)replay_buffer;
}

View File

@ -0,0 +1,256 @@
#include "../../include/replay_buffer/replay_buffer_ram.h"
#include "../../include/utils.h"
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <libavutil/mem.h>
static void gsr_replay_buffer_ram_set_impl_funcs(gsr_replay_buffer_ram *self);
static gsr_av_packet_ram* gsr_av_packet_ram_create(const AVPacket *av_packet, double timestamp) {
gsr_av_packet_ram *self = malloc(sizeof(gsr_av_packet_ram));
if(!self)
return NULL;
self->ref_counter = 1;
self->packet = *av_packet;
self->timestamp = timestamp;
// Why are we doing this you ask? there is a ffmpeg bug that causes cpu usage to increase over time when you have
// packets that are not being free'd until later. So we copy the packet data, free the packet and then reconstruct
// the packet later on when we need it, to keep packets alive only for a short period.
self->packet.data = av_memdup(av_packet->data, av_packet->size);
if(!self->packet.data) {
free(self);
return NULL;
}
return self;
}
static gsr_av_packet_ram* gsr_av_packet_ram_ref(gsr_av_packet_ram *self) {
if(self->ref_counter >= 1)
++self->ref_counter;
return self;
}
static void gsr_av_packet_ram_free(gsr_av_packet_ram *self) {
self->ref_counter = 0;
if(self->packet.data) {
av_free(self->packet.data);
self->packet.data = NULL;
}
free(self);
}
static void gsr_av_packet_ram_unref(gsr_av_packet_ram *self) {
if(self->ref_counter >= 1)
--self->ref_counter;
if(self->ref_counter <= 0)
gsr_av_packet_ram_free(self);
}
static void gsr_replay_buffer_ram_destroy(gsr_replay_buffer *replay_buffer) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
gsr_replay_buffer_lock(&self->replay_buffer);
for(size_t i = 0; i < self->num_packets; ++i) {
if(self->packets[i]) {
gsr_av_packet_ram_unref(self->packets[i]);
self->packets[i] = NULL;
}
}
self->num_packets = 0;
gsr_replay_buffer_unlock(&self->replay_buffer);
if(self->packets) {
free(self->packets);
self->packets = NULL;
}
self->capacity_num_packets = 0;
self->index = 0;
}
static bool gsr_replay_buffer_ram_append(gsr_replay_buffer *replay_buffer, const AVPacket *av_packet, double timestamp) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
gsr_replay_buffer_lock(&self->replay_buffer);
gsr_av_packet_ram *packet = gsr_av_packet_ram_create(av_packet, timestamp);
if(!packet) {
gsr_replay_buffer_unlock(&self->replay_buffer);
return false;
}
if(self->packets[self->index]) {
gsr_av_packet_ram_unref(self->packets[self->index]);
self->packets[self->index] = NULL;
}
self->packets[self->index] = packet;
self->index = (self->index + 1) % self->capacity_num_packets;
++self->num_packets;
if(self->num_packets > self->capacity_num_packets)
self->num_packets = self->capacity_num_packets;
gsr_replay_buffer_unlock(&self->replay_buffer);
return true;
}
static void gsr_replay_buffer_ram_clear(gsr_replay_buffer *replay_buffer) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
gsr_replay_buffer_lock(&self->replay_buffer);
for(size_t i = 0; i < self->num_packets; ++i) {
if(self->packets[i]) {
gsr_av_packet_ram_unref(self->packets[i]);
self->packets[i] = NULL;
}
}
self->num_packets = 0;
self->index = 0;
gsr_replay_buffer_unlock(&self->replay_buffer);
}
static gsr_av_packet_ram* gsr_replay_buffer_ram_get_packet_at_index(gsr_replay_buffer *replay_buffer, size_t index) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
assert(index < self->num_packets);
size_t start_index = 0;
if(self->num_packets < self->capacity_num_packets)
start_index = self->num_packets - self->index;
else
start_index = self->index;
const size_t offset = (start_index + index) % self->capacity_num_packets;
return self->packets[offset];
}
static AVPacket* gsr_replay_buffer_ram_iterator_get_packet(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator iterator) {
return &gsr_replay_buffer_ram_get_packet_at_index(replay_buffer, iterator.packet_index)->packet;
}
static uint8_t* gsr_replay_buffer_ram_iterator_get_packet_data(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator iterator) {
(void)replay_buffer;
(void)iterator;
return NULL;
}
static gsr_replay_buffer* gsr_replay_buffer_ram_clone(gsr_replay_buffer *replay_buffer) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
gsr_replay_buffer_ram *destination = calloc(1, sizeof(gsr_replay_buffer_ram));
if(!destination)
return NULL;
gsr_replay_buffer_ram_set_impl_funcs(destination);
gsr_replay_buffer_lock(&self->replay_buffer);
destination->replay_buffer.original_replay_buffer = replay_buffer;
destination->replay_buffer.mutex = self->replay_buffer.mutex;
destination->replay_buffer.mutex_initialized = self->replay_buffer.mutex_initialized;
destination->capacity_num_packets = self->capacity_num_packets;
destination->index = self->index;
destination->packets = calloc(destination->capacity_num_packets, sizeof(gsr_av_packet_ram*));
if(!destination->packets) {
free(destination);
gsr_replay_buffer_unlock(&self->replay_buffer);
return NULL;
}
destination->num_packets = self->num_packets;
for(size_t i = 0; i < destination->num_packets; ++i) {
destination->packets[i] = gsr_av_packet_ram_ref(self->packets[i]);
}
gsr_replay_buffer_unlock(&self->replay_buffer);
return (gsr_replay_buffer*)destination;
}
/* Binary search */
static gsr_replay_buffer_iterator gsr_replay_buffer_ram_find_packet_index_by_time_passed(gsr_replay_buffer *replay_buffer, int seconds) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
gsr_replay_buffer_lock(&self->replay_buffer);
const double now = clock_get_monotonic_seconds();
if(self->num_packets == 0) {
gsr_replay_buffer_unlock(&self->replay_buffer);
return (gsr_replay_buffer_iterator){0, 0};
}
size_t lower_bound = 0;
size_t upper_bound = self->num_packets;
size_t index = 0;
for(;;) {
index = lower_bound + (upper_bound - lower_bound) / 2;
const gsr_av_packet_ram *packet = gsr_replay_buffer_ram_get_packet_at_index(replay_buffer, index);
const double time_passed_since_packet = now - packet->timestamp;
if(time_passed_since_packet >= seconds) {
if(lower_bound == index)
break;
lower_bound = index;
} else {
if(upper_bound == index)
break;
upper_bound = index;
}
}
gsr_replay_buffer_unlock(&self->replay_buffer);
return (gsr_replay_buffer_iterator){index, 0};
}
static gsr_replay_buffer_iterator gsr_replay_buffer_ram_find_keyframe(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator start_iterator, int stream_index, bool invert_stream_index) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
size_t keyframe_index = (size_t)-1;
gsr_replay_buffer_lock(&self->replay_buffer);
for(size_t i = start_iterator.packet_index; i < self->num_packets; ++i) {
const gsr_av_packet_ram *packet = gsr_replay_buffer_ram_get_packet_at_index(replay_buffer, i);
if((packet->packet.flags & AV_PKT_FLAG_KEY) && (invert_stream_index ? packet->packet.stream_index != stream_index : packet->packet.stream_index == stream_index)) {
keyframe_index = i;
break;
}
}
gsr_replay_buffer_unlock(&self->replay_buffer);
return (gsr_replay_buffer_iterator){keyframe_index, 0};
}
static bool gsr_replay_buffer_ram_iterator_next(gsr_replay_buffer *replay_buffer, gsr_replay_buffer_iterator *iterator) {
gsr_replay_buffer_ram *self = (gsr_replay_buffer_ram*)replay_buffer;
if(iterator->packet_index + 1 < self->num_packets) {
++iterator->packet_index;
return true;
} else {
return false;
}
}
static void gsr_replay_buffer_ram_set_impl_funcs(gsr_replay_buffer_ram *self) {
self->replay_buffer.destroy = gsr_replay_buffer_ram_destroy;
self->replay_buffer.append = gsr_replay_buffer_ram_append;
self->replay_buffer.clear = gsr_replay_buffer_ram_clear;
self->replay_buffer.iterator_get_packet = gsr_replay_buffer_ram_iterator_get_packet;
self->replay_buffer.iterator_get_packet_data = gsr_replay_buffer_ram_iterator_get_packet_data;
self->replay_buffer.clone = gsr_replay_buffer_ram_clone;
self->replay_buffer.find_packet_index_by_time_passed = gsr_replay_buffer_ram_find_packet_index_by_time_passed;
self->replay_buffer.find_keyframe = gsr_replay_buffer_ram_find_keyframe;
self->replay_buffer.iterator_next = gsr_replay_buffer_ram_iterator_next;
}
gsr_replay_buffer* gsr_replay_buffer_ram_create(size_t replay_buffer_num_packets) {
assert(replay_buffer_num_packets > 0);
gsr_replay_buffer_ram *replay_buffer = calloc(1, sizeof(gsr_replay_buffer_ram));
if(!replay_buffer)
return NULL;
replay_buffer->capacity_num_packets = replay_buffer_num_packets;
replay_buffer->num_packets = 0;
replay_buffer->index = 0;
replay_buffer->packets = calloc(replay_buffer->capacity_num_packets, sizeof(gsr_av_packet_ram*));
if(!replay_buffer->packets) {
gsr_replay_buffer_ram_destroy(&replay_buffer->replay_buffer);
free(replay_buffer);
return NULL;
}
gsr_replay_buffer_ram_set_impl_funcs(replay_buffer);
return (gsr_replay_buffer*)replay_buffer;
}

147
src/shader.c Normal file
View File

@ -0,0 +1,147 @@
#include "../include/shader.h"
#include "../include/egl.h"
#include <stdio.h>
#include <assert.h>
static bool print_compile_errors = false;
static int min_int(int a, int b) {
return a < b ? a : b;
}
static unsigned int load_shader(gsr_egl *egl, unsigned int type, const char *source) {
unsigned int shader_id = egl->glCreateShader(type);
if(shader_id == 0) {
fprintf(stderr, "gsr error: load_shader: failed to create shader, error: %d\n", egl->glGetError());
return 0;
}
egl->glShaderSource(shader_id, 1, &source, NULL);
egl->glCompileShader(shader_id);
int compiled = 0;
egl->glGetShaderiv(shader_id, GL_COMPILE_STATUS, &compiled);
if(!compiled) {
int info_length = 0;
egl->glGetShaderiv(shader_id, GL_INFO_LOG_LENGTH, &info_length);
if(info_length > 1 && print_compile_errors) {
char info_log[4096];
egl->glGetShaderInfoLog(shader_id, min_int(4096, info_length), NULL, info_log);
fprintf(stderr, "gsr error: load_shader: failed to compile shader, error:\n%s\nshader source:\n%s\n", info_log, source);
}
egl->glDeleteShader(shader_id);
return 0;
}
return shader_id;
}
static unsigned int load_program(gsr_egl *egl, const char *vertex_shader, const char *fragment_shader) {
unsigned int vertex_shader_id = 0;
unsigned int fragment_shader_id = 0;
unsigned int program_id = 0;
int linked = 0;
bool success = false;
if(vertex_shader) {
vertex_shader_id = load_shader(egl, GL_VERTEX_SHADER, vertex_shader);
if(vertex_shader_id == 0)
goto done;
}
if(fragment_shader) {
fragment_shader_id = load_shader(egl, GL_FRAGMENT_SHADER, fragment_shader);
if(fragment_shader_id == 0)
goto done;
}
program_id = egl->glCreateProgram();
if(program_id == 0) {
fprintf(stderr, "gsr error: load_program: failed to create shader program, error: %d\n", egl->glGetError());
goto done;
}
if(vertex_shader_id)
egl->glAttachShader(program_id, vertex_shader_id);
if(fragment_shader_id)
egl->glAttachShader(program_id, fragment_shader_id);
egl->glLinkProgram(program_id);
egl->glGetProgramiv(program_id, GL_LINK_STATUS, &linked);
if(!linked) {
int info_length = 0;
egl->glGetProgramiv(program_id, GL_INFO_LOG_LENGTH, &info_length);
if(info_length > 1) {
char info_log[4096];
egl->glGetProgramInfoLog(program_id, min_int(4096, info_length), NULL, info_log);
fprintf(stderr, "gsr error: load program: linking shader program failed, error:\n%s\n", info_log);
}
goto done;
}
success = true;
done:
if(!success) {
if(program_id)
egl->glDeleteProgram(program_id);
}
if(fragment_shader_id)
egl->glDeleteShader(fragment_shader_id);
if(vertex_shader_id)
egl->glDeleteShader(vertex_shader_id);
return program_id;
}
int gsr_shader_init(gsr_shader *self, gsr_egl *egl, const char *vertex_shader, const char *fragment_shader) {
assert(egl);
self->egl = egl;
self->program_id = 0;
if(!vertex_shader && !fragment_shader) {
fprintf(stderr, "gsr error: gsr_shader_init: vertex and fragment shader can't be NULL at the same time\n");
return -1;
}
self->program_id = load_program(self->egl, vertex_shader, fragment_shader);
if(self->program_id == 0)
return -1;
return 0;
}
void gsr_shader_deinit(gsr_shader *self) {
if(!self->egl)
return;
if(self->program_id) {
self->egl->glDeleteProgram(self->program_id);
self->program_id = 0;
}
self->egl = NULL;
}
int gsr_shader_bind_attribute_location(gsr_shader *self, const char *attribute, int location) {
while(self->egl->glGetError()) {}
self->egl->glBindAttribLocation(self->program_id, location, attribute);
return self->egl->glGetError();
}
void gsr_shader_use(gsr_shader *self) {
self->egl->glUseProgram(self->program_id);
}
void gsr_shader_use_none(gsr_shader *self) {
self->egl->glUseProgram(0);
}
void gsr_shader_enable_debug_output(bool enable) {
print_compile_errors = enable;
}

647
src/sound.cpp Normal file
View File

@ -0,0 +1,647 @@
#include "../include/sound.hpp"
extern "C" {
#include "../include/utils.h"
}
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <time.h>
#include <mutex>
#include <pulse/pulseaudio.h>
#include <pulse/mainloop.h>
#include <pulse/xmalloc.h>
#include <pulse/error.h>
#define RECONNECT_TRY_TIMEOUT_SECONDS 0.5
#define DEVICE_NAME_MAX_SIZE 128
#define CHECK_DEAD_GOTO(p, rerror, label) \
do { \
if (!(p)->context || !PA_CONTEXT_IS_GOOD(pa_context_get_state((p)->context)) || \
!(p)->stream || !PA_STREAM_IS_GOOD(pa_stream_get_state((p)->stream))) { \
if (((p)->context && pa_context_get_state((p)->context) == PA_CONTEXT_FAILED) || \
((p)->stream && pa_stream_get_state((p)->stream) == PA_STREAM_FAILED)) { \
if (rerror) \
*(rerror) = pa_context_errno((p)->context); \
} else \
if (rerror) \
*(rerror) = PA_ERR_BADSTATE; \
goto label; \
} \
} while(false);
enum class DeviceType {
STANDARD,
DEFAULT_OUTPUT,
DEFAULT_INPUT
};
struct pa_handle {
pa_context *context;
pa_stream *stream;
pa_mainloop *mainloop;
const void *read_data;
size_t read_index, read_length;
uint8_t *output_data;
size_t output_index, output_length;
int operation_success;
double latency_seconds;
pa_buffer_attr attr;
pa_sample_spec ss;
std::mutex reconnect_mutex;
DeviceType device_type;
char stream_name[256];
bool reconnect;
double reconnect_last_tried_seconds;
char device_name[DEVICE_NAME_MAX_SIZE];
char default_output_device_name[DEVICE_NAME_MAX_SIZE];
char default_input_device_name[DEVICE_NAME_MAX_SIZE];
};
static void pa_sound_device_free(pa_handle *p) {
assert(p);
if (p->stream) {
pa_stream_unref(p->stream);
p->stream = NULL;
}
if (p->context) {
pa_context_disconnect(p->context);
pa_context_unref(p->context);
p->context = NULL;
}
if (p->mainloop) {
pa_mainloop_free(p->mainloop);
p->mainloop = NULL;
}
if (p->output_data) {
free(p->output_data);
p->output_data = NULL;
}
pa_xfree(p);
}
static void subscribe_update_default_devices(pa_context*, const pa_server_info *server_info, void *userdata) {
pa_handle *handle = (pa_handle*)userdata;
std::lock_guard<std::mutex> lock(handle->reconnect_mutex);
if(server_info->default_sink_name) {
// TODO: Size check
snprintf(handle->default_output_device_name, sizeof(handle->default_output_device_name), "%s.monitor", server_info->default_sink_name);
if(handle->device_type == DeviceType::DEFAULT_OUTPUT && strcmp(handle->device_name, handle->default_output_device_name) != 0) {
handle->reconnect = true;
handle->reconnect_last_tried_seconds = clock_get_monotonic_seconds();
// TODO: Size check
snprintf(handle->device_name, sizeof(handle->device_name), "%s", handle->default_output_device_name);
}
}
if(server_info->default_source_name) {
// TODO: Size check
snprintf(handle->default_input_device_name, sizeof(handle->default_input_device_name), "%s", server_info->default_source_name);
if(handle->device_type == DeviceType::DEFAULT_INPUT && strcmp(handle->device_name, handle->default_input_device_name) != 0) {
handle->reconnect = true;
handle->reconnect_last_tried_seconds = clock_get_monotonic_seconds();
// TODO: Size check
snprintf(handle->device_name, sizeof(handle->device_name), "%s", handle->default_input_device_name);
}
}
}
static void subscribe_cb(pa_context *c, pa_subscription_event_type_t t, uint32_t idx, void *userdata) {
(void)idx;
pa_handle *handle = (pa_handle*)userdata;
if((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SERVER) {
pa_operation *pa = pa_context_get_server_info(c, subscribe_update_default_devices, handle);
if(pa)
pa_operation_unref(pa);
}
}
static void store_default_devices(pa_context*, const pa_server_info *server_info, void *userdata) {
pa_handle *handle = (pa_handle*)userdata;
if(server_info->default_sink_name)
snprintf(handle->default_output_device_name, sizeof(handle->default_output_device_name), "%s.monitor", server_info->default_sink_name);
if(server_info->default_source_name)
snprintf(handle->default_input_device_name, sizeof(handle->default_input_device_name), "%s", server_info->default_source_name);
}
static bool startup_get_default_devices(pa_handle *p, const char *device_name) {
pa_operation *pa = pa_context_get_server_info(p->context, store_default_devices, p);
while(pa) {
pa_operation_state state = pa_operation_get_state(pa);
if(state == PA_OPERATION_DONE) {
pa_operation_unref(pa);
break;
} else if(state == PA_OPERATION_CANCELLED) {
pa_operation_unref(pa);
return false;
}
pa_mainloop_iterate(p->mainloop, 1, NULL);
}
if(p->default_output_device_name[0] == '\0') {
fprintf(stderr, "gsr error: failed to find default audio output device\n");
return false;
}
if(strcmp(device_name, "default_output") == 0) {
snprintf(p->device_name, sizeof(p->device_name), "%s", p->default_output_device_name);
p->device_type = DeviceType::DEFAULT_OUTPUT;
} else if(strcmp(device_name, "default_input") == 0) {
snprintf(p->device_name, sizeof(p->device_name), "%s", p->default_input_device_name);
p->device_type = DeviceType::DEFAULT_INPUT;
} else {
snprintf(p->device_name, sizeof(p->device_name), "%s", device_name);
p->device_type = DeviceType::STANDARD;
}
return true;
}
static pa_handle* pa_sound_device_new(const char *server,
const char *name,
const char *device_name,
const char *stream_name,
const pa_sample_spec *ss,
const pa_buffer_attr *attr,
int *rerror) {
pa_handle *p;
int error = PA_ERR_INTERNAL;
pa_operation *pa = NULL;
p = pa_xnew0(pa_handle, 1);
p->attr = *attr;
p->ss = *ss;
snprintf(p->stream_name, sizeof(p->stream_name), "%s", stream_name);
p->reconnect = true;
p->reconnect_last_tried_seconds = clock_get_monotonic_seconds() - (RECONNECT_TRY_TIMEOUT_SECONDS * 1000.0 * 2.0);
p->default_output_device_name[0] = '\0';
p->default_input_device_name[0] = '\0';
p->device_type = DeviceType::STANDARD;
const int buffer_size = attr->fragsize;
void *buffer = malloc(buffer_size);
if(!buffer) {
fprintf(stderr, "gsr error: failed to allocate buffer for audio\n");
*rerror = -1;
return NULL;
}
p->output_data = (uint8_t*)buffer;
p->output_length = buffer_size;
p->output_index = 0;
pa_proplist *proplist = pa_proplist_new();
pa_proplist_sets(proplist, PA_PROP_MEDIA_ROLE, "production");
if(strcmp(device_name, "") == 0) {
pa_proplist_sets(proplist, "node.autoconnect", "false");
pa_proplist_sets(proplist, "node.dont-reconnect", "true");
}
if (!(p->mainloop = pa_mainloop_new()))
goto fail;
if (!(p->context = pa_context_new_with_proplist(pa_mainloop_get_api(p->mainloop), name, proplist)))
goto fail;
if (pa_context_connect(p->context, server, PA_CONTEXT_NOFLAGS, NULL) < 0) {
error = pa_context_errno(p->context);
goto fail;
}
for (;;) {
pa_context_state_t state = pa_context_get_state(p->context);
if (state == PA_CONTEXT_READY)
break;
if (!PA_CONTEXT_IS_GOOD(state)) {
error = pa_context_errno(p->context);
goto fail;
}
pa_mainloop_iterate(p->mainloop, 1, NULL);
}
if(!startup_get_default_devices(p, device_name))
goto fail;
pa_context_set_subscribe_callback(p->context, subscribe_cb, p);
pa = pa_context_subscribe(p->context, PA_SUBSCRIPTION_MASK_SERVER, NULL, NULL);
if(pa)
pa_operation_unref(pa);
pa_proplist_free(proplist);
return p;
fail:
if (rerror)
*rerror = error;
pa_sound_device_free(p);
pa_proplist_free(proplist);
return NULL;
}
static bool pa_sound_device_should_reconnect(pa_handle *p, double now, char *device_name, size_t device_name_size) {
std::lock_guard<std::mutex> lock(p->reconnect_mutex);
if(!p->reconnect && (!p->stream || !PA_STREAM_IS_GOOD(pa_stream_get_state(p->stream)))) {
p->reconnect = true;
p->reconnect_last_tried_seconds = now;
}
if(p->reconnect && now - p->reconnect_last_tried_seconds >= RECONNECT_TRY_TIMEOUT_SECONDS) {
p->reconnect_last_tried_seconds = now;
// TODO: Size check
snprintf(device_name, device_name_size, "%s", p->device_name);
return true;
}
return false;
}
static bool pa_sound_device_handle_reconnect(pa_handle *p, char *device_name, size_t device_name_size, double now) {
int r;
if(!pa_sound_device_should_reconnect(p, now, device_name, device_name_size))
return true;
if(p->stream) {
pa_stream_disconnect(p->stream);
pa_stream_unref(p->stream);
p->stream = NULL;
}
if(!(p->stream = pa_stream_new(p->context, p->stream_name, &p->ss, NULL))) {
//pa_context_errno(p->context);
return false;
}
r = pa_stream_connect_record(p->stream, device_name, &p->attr,
(pa_stream_flags_t)(PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_ADJUST_LATENCY|PA_STREAM_AUTO_TIMING_UPDATE));
if(r < 0) {
//pa_context_errno(p->context);
return false;
}
pa_mainloop_iterate(p->mainloop, 0, NULL);
std::lock_guard<std::mutex> lock(p->reconnect_mutex);
p->reconnect = false;
return true;
}
static int pa_sound_device_read(pa_handle *p, double timeout_seconds) {
assert(p);
const double start_time = clock_get_monotonic_seconds();
char device_name[DEVICE_NAME_MAX_SIZE];
bool success = false;
int r = 0;
int *rerror = &r;
pa_usec_t latency = 0;
int negative = 0;
pa_mainloop_iterate(p->mainloop, 0, NULL);
if(!pa_sound_device_handle_reconnect(p, device_name, sizeof(device_name), start_time) || !p->stream)
goto fail;
if(pa_stream_get_state(p->stream) != PA_STREAM_READY)
goto fail;
CHECK_DEAD_GOTO(p, rerror, fail);
while (p->output_index < p->output_length) {
if(clock_get_monotonic_seconds() - start_time >= timeout_seconds)
return -1;
if(!p->read_data) {
pa_mainloop_prepare(p->mainloop, 1 * 1000); // 1 ms
pa_mainloop_poll(p->mainloop);
pa_mainloop_dispatch(p->mainloop);
if(pa_stream_peek(p->stream, &p->read_data, &p->read_length) < 0)
goto fail;
if(!p->read_data && p->read_length == 0)
continue;
if(!p->read_data && p->read_length > 0) {
// There is a hole in the stream :( drop it. Maybe we should generate silence instead? TODO
if(pa_stream_drop(p->stream) != 0)
goto fail;
continue;
}
if(p->read_length <= 0) {
p->read_data = NULL;
if(pa_stream_drop(p->stream) != 0)
goto fail;
CHECK_DEAD_GOTO(p, rerror, fail);
continue;
}
pa_operation_unref(pa_stream_update_timing_info(p->stream, NULL, NULL));
// TODO: Deal with one pa_stream_peek not being enough. In that case we need to add multiple of these together(?)
if(pa_stream_get_latency(p->stream, &latency, &negative) >= 0) {
p->latency_seconds = negative ? -(double)latency : latency;
if(p->latency_seconds < 0.0)
p->latency_seconds = 0.0;
p->latency_seconds *= 0.0000001;
}
}
const size_t space_free_in_output_buffer = p->output_length - p->output_index;
if(space_free_in_output_buffer < p->read_length) {
memcpy(p->output_data + p->output_index, (const uint8_t*)p->read_data + p->read_index, space_free_in_output_buffer);
p->output_index = 0;
p->read_index += space_free_in_output_buffer;
p->read_length -= space_free_in_output_buffer;
break;
} else {
memcpy(p->output_data + p->output_index, (const uint8_t*)p->read_data + p->read_index, p->read_length);
p->output_index += p->read_length;
p->read_data = NULL;
p->read_length = 0;
p->read_index = 0;
if(pa_stream_drop(p->stream) != 0)
goto fail;
if(p->output_index == p->output_length) {
p->output_index = 0;
break;
}
}
}
success = true;
fail:
return success ? 0 : -1;
}
static pa_sample_format_t audio_format_to_pulse_audio_format(AudioFormat audio_format) {
switch(audio_format) {
case S16: return PA_SAMPLE_S16LE;
case S32: return PA_SAMPLE_S32LE;
case F32: return PA_SAMPLE_FLOAT32LE;
}
assert(false);
return PA_SAMPLE_S16LE;
}
static int audio_format_to_get_bytes_per_sample(AudioFormat audio_format) {
switch(audio_format) {
case S16: return 2;
case S32: return 4;
case F32: return 4;
}
assert(false);
return 2;
}
int sound_device_get_by_name(SoundDevice *device, const char *node_name, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format) {
pa_sample_spec ss;
ss.format = audio_format_to_pulse_audio_format(audio_format);
ss.rate = 48000;
ss.channels = num_channels;
pa_buffer_attr buffer_attr;
buffer_attr.fragsize = period_frame_size * audio_format_to_get_bytes_per_sample(audio_format) * num_channels; // 2/4 bytes/sample, @num_channels channels
buffer_attr.tlength = -1;
buffer_attr.prebuf = -1;
buffer_attr.minreq = -1;
buffer_attr.maxlength = buffer_attr.fragsize;
int error = 0;
pa_handle *handle = pa_sound_device_new(nullptr, node_name, device_name, description, &ss, &buffer_attr, &error);
if(!handle) {
fprintf(stderr, "gsr error: pa_sound_device_new() failed: %s. Audio input device %s might not be valid\n", pa_strerror(error), device_name);
return -1;
}
device->handle = handle;
device->frames = period_frame_size;
return 0;
}
void sound_device_close(SoundDevice *device) {
if(device->handle)
pa_sound_device_free((pa_handle*)device->handle);
device->handle = NULL;
}
int sound_device_read_next_chunk(SoundDevice *device, void **buffer, double timeout_sec, double *latency_seconds) {
pa_handle *pa = (pa_handle*)device->handle;
if(pa_sound_device_read(pa, timeout_sec) < 0) {
//fprintf(stderr, "pa_simple_read() failed: %s\n", pa_strerror(error));
*latency_seconds = 0.0;
return -1;
}
*buffer = pa->output_data;
*latency_seconds = pa->latency_seconds;
return device->frames;
}
static void pa_state_cb(pa_context *c, void *userdata) {
pa_context_state state = pa_context_get_state(c);
int *pa_ready = (int*)userdata;
switch(state) {
case PA_CONTEXT_UNCONNECTED:
case PA_CONTEXT_CONNECTING:
case PA_CONTEXT_AUTHORIZING:
case PA_CONTEXT_SETTING_NAME:
default:
break;
case PA_CONTEXT_FAILED:
case PA_CONTEXT_TERMINATED:
*pa_ready = 2;
break;
case PA_CONTEXT_READY:
*pa_ready = 1;
break;
}
}
static void pa_sourcelist_cb(pa_context*, const pa_source_info *source_info, int eol, void *userdata) {
if(eol > 0)
return;
AudioDevices *audio_devices = (AudioDevices*)userdata;
audio_devices->audio_inputs.push_back({ source_info->name, source_info->description });
}
static void pa_server_info_cb(pa_context*, const pa_server_info *server_info, void *userdata) {
AudioDevices *audio_devices = (AudioDevices*)userdata;
if(server_info->default_sink_name)
audio_devices->default_output = std::string(server_info->default_sink_name) + ".monitor";
if(server_info->default_source_name)
audio_devices->default_input = server_info->default_source_name;
}
static void server_info_callback(pa_context*, const pa_server_info *server_info, void *userdata) {
bool *is_server_pipewire = (bool*)userdata;
if(server_info->server_name && strstr(server_info->server_name, "PipeWire"))
*is_server_pipewire = true;
}
static void get_pulseaudio_default_inputs(AudioDevices &audio_devices) {
int state = 0;
int pa_ready = 0;
pa_operation *pa_op = NULL;
pa_mainloop *main_loop = pa_mainloop_new();
if(!main_loop)
return;
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
if(pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL) < 0)
goto done;
pa_context_set_state_callback(ctx, pa_state_cb, &pa_ready);
for(;;) {
// Not ready
if(pa_ready == 0) {
pa_mainloop_iterate(main_loop, 1, NULL);
continue;
}
switch(state) {
case 0: {
pa_op = pa_context_get_server_info(ctx, pa_server_info_cb, &audio_devices);
++state;
break;
}
}
// Couldn't get connection to the server
if(pa_ready == 2 || (state == 1 && pa_op && pa_operation_get_state(pa_op) == PA_OPERATION_DONE))
break;
pa_mainloop_iterate(main_loop, 1, NULL);
}
done:
if(pa_op)
pa_operation_unref(pa_op);
pa_context_disconnect(ctx);
pa_context_unref(ctx);
pa_mainloop_free(main_loop);
}
AudioDevices get_pulseaudio_inputs() {
AudioDevices audio_devices;
int state = 0;
int pa_ready = 0;
pa_operation *pa_op = NULL;
// TODO: Do this in the same connection below instead of two separate connections
get_pulseaudio_default_inputs(audio_devices);
pa_mainloop *main_loop = pa_mainloop_new();
if(!main_loop)
return audio_devices;
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
if(pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL) < 0)
goto done;
pa_context_set_state_callback(ctx, pa_state_cb, &pa_ready);
for(;;) {
// Not ready
if(pa_ready == 0) {
pa_mainloop_iterate(main_loop, 1, NULL);
continue;
}
switch(state) {
case 0: {
pa_op = pa_context_get_source_info_list(ctx, pa_sourcelist_cb, &audio_devices);
++state;
break;
}
}
// Couldn't get connection to the server
if(pa_ready == 2 || (state == 1 && pa_op && pa_operation_get_state(pa_op) == PA_OPERATION_DONE))
break;
pa_mainloop_iterate(main_loop, 1, NULL);
}
done:
if(pa_op)
pa_operation_unref(pa_op);
pa_context_disconnect(ctx);
pa_context_unref(ctx);
pa_mainloop_free(main_loop);
return audio_devices;
}
bool pulseaudio_server_is_pipewire() {
int state = 0;
int pa_ready = 0;
pa_operation *pa_op = NULL;
bool is_server_pipewire = false;
pa_mainloop *main_loop = pa_mainloop_new();
if(!main_loop)
return is_server_pipewire;
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
if(pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL) < 0)
goto done;
pa_context_set_state_callback(ctx, pa_state_cb, &pa_ready);
for(;;) {
// Not ready
if(pa_ready == 0) {
pa_mainloop_iterate(main_loop, 1, NULL);
continue;
}
switch(state) {
case 0: {
pa_op = pa_context_get_server_info(ctx, server_info_callback, &is_server_pipewire);
++state;
break;
}
}
// Couldn't get connection to the server
if(pa_ready == 2 || (state == 1 && pa_op && pa_operation_get_state(pa_op) == PA_OPERATION_DONE))
break;
pa_mainloop_iterate(main_loop, 1, NULL);
}
done:
if(pa_op)
pa_operation_unref(pa_op);
pa_context_disconnect(ctx);
pa_context_unref(ctx);
pa_mainloop_free(main_loop);
return is_server_pipewire;
}

677
src/utils.c Normal file
View File

@ -0,0 +1,677 @@
#include "../include/utils.h"
#include "../include/window/window.h"
#include "../include/capture/capture.h"
#include <time.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/random.h>
#include <errno.h>
#include <assert.h>
#include <xf86drmMode.h>
#include <xf86drm.h>
#include <X11/Xatom.h>
#include <X11/extensions/Xrandr.h>
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_vaapi.h>
#define DRM_NUM_BUF_ATTRS 4
double clock_get_monotonic_seconds(void) {
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 0;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (double)ts.tv_sec + (double)ts.tv_nsec * 0.000000001;
}
bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
/* TODO: Use other functions on other platforms than linux */
if(getrandom(buffer, buffer_size, 0) < buffer_size) {
fprintf(stderr, "Failed to get random bytes, error: %s\n", strerror(errno));
return false;
}
for(int i = 0; i < buffer_size; ++i) {
unsigned char c = *(unsigned char*)&buffer[i];
buffer[i] = alphabet[c % alphabet_size];
}
return true;
}
bool generate_random_characters_standard_alphabet(char *buffer, int buffer_size) {
return generate_random_characters(buffer, buffer_size, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62);
}
static const XRRModeInfo* get_mode_info(const XRRScreenResources *sr, RRMode id) {
for(int i = 0; i < sr->nmode; ++i) {
if(sr->modes[i].id == id)
return &sr->modes[i];
}
return NULL;
}
static gsr_monitor_rotation x11_rotation_to_gsr_rotation(int rot) {
switch(rot) {
case RR_Rotate_0: return GSR_MONITOR_ROT_0;
case RR_Rotate_90: return GSR_MONITOR_ROT_90;
case RR_Rotate_180: return GSR_MONITOR_ROT_180;
case RR_Rotate_270: return GSR_MONITOR_ROT_270;
}
return GSR_MONITOR_ROT_0;
}
static uint32_t x11_output_get_connector_id(Display *dpy, RROutput output, Atom randr_connector_id_atom) {
Atom type = 0;
int format = 0;
unsigned long bytes_after = 0;
unsigned long nitems = 0;
unsigned char *prop = NULL;
XRRGetOutputProperty(dpy, output, randr_connector_id_atom, 0, 128, false, false, AnyPropertyType, &type, &format, &nitems, &bytes_after, &prop);
long result = 0;
if(type == XA_INTEGER && format == 32)
result = *(long*)prop;
free(prop);
return result;
}
static vec2i get_monitor_size_rotated(int width, int height, gsr_monitor_rotation rotation) {
vec2i size = { .x = width, .y = height };
if(rotation == GSR_MONITOR_ROT_90 || rotation == GSR_MONITOR_ROT_270) {
int tmp_x = size.x;
size.x = size.y;
size.y = tmp_x;
}
return size;
}
void for_each_active_monitor_output_x11_not_cached(Display *display, active_monitor_callback callback, void *userdata) {
XRRScreenResources *screen_res = XRRGetScreenResources(display, DefaultRootWindow(display));
if(!screen_res)
return;
const Atom randr_connector_id_atom = XInternAtom(display, "CONNECTOR_ID", False);
char display_name[256];
for(int i = 0; i < screen_res->noutput; ++i) {
XRROutputInfo *out_info = XRRGetOutputInfo(display, screen_res, screen_res->outputs[i]);
if(out_info && out_info->crtc && out_info->connection == RR_Connected) {
XRRCrtcInfo *crt_info = XRRGetCrtcInfo(display, screen_res, out_info->crtc);
if(crt_info && crt_info->mode) {
// We want to use the current mode info width/height (mode_info->width/height) instead of crtc info width/height (crt_info->width/height) because crtc info
// is scaled if the monitor is scaled (xrandr --output DP-1 --scale 1.5). Normally this is not an issue for x11 applications,
// but gpu screen recorder captures the drm framebuffer instead of x11 api. This drm framebuffer which doesn't increase in size when using xrandr scaling.
// Maybe a better option would be to get the drm crtc size instead.
const XRRModeInfo *mode_info = get_mode_info(screen_res, crt_info->mode);
if(mode_info) {
snprintf(display_name, sizeof(display_name), "%.*s", (int)out_info->nameLen, out_info->name);
const gsr_monitor_rotation rotation = x11_rotation_to_gsr_rotation(crt_info->rotation);
const vec2i monitor_size = get_monitor_size_rotated(mode_info->width, mode_info->height, rotation);
const gsr_monitor monitor = {
.name = display_name,
.name_len = out_info->nameLen,
.pos = { .x = crt_info->x, .y = crt_info->y },
.size = monitor_size,
.connector_id = x11_output_get_connector_id(display, screen_res->outputs[i], randr_connector_id_atom),
.rotation = rotation,
.monitor_identifier = out_info->crtc
};
callback(&monitor, userdata);
}
}
if(crt_info)
XRRFreeCrtcInfo(crt_info);
}
if(out_info)
XRRFreeOutputInfo(out_info);
}
XRRFreeScreenResources(screen_res);
}
/* TODO: Support more connector types */
int get_connector_type_by_name(const char *name) {
int len = strlen(name);
if(len >= 5 && strncmp(name, "HDMI-", 5) == 0)
return 1;
else if(len >= 3 && strncmp(name, "DP-", 3) == 0)
return 2;
else if(len >= 12 && strncmp(name, "DisplayPort-", 12) == 0)
return 3;
else if(len >= 4 && strncmp(name, "eDP-", 4) == 0)
return 4;
else if(len >= 4 && strncmp(name, "DVI-", 4) == 0)
return 5;
else
return -1;
}
int get_connector_type_id_by_name(const char *name) {
int len = strlen(name);
int num_start = 0;
for(int i = len - 1; i >= 0; --i) {
const bool is_num = name[i] >= '0' && name[i] <= '9';
if(!is_num) {
num_start = i + 1;
break;
}
}
const int num_len = len - num_start;
if(num_len <= 0)
return -1;
return atoi(name + num_start);
}
uint32_t monitor_identifier_from_type_and_count(int monitor_type_index, int monitor_type_count) {
return ((uint32_t)monitor_type_index << 16) | ((uint32_t)monitor_type_count);
}
static bool connector_get_property_by_name(int drmfd, drmModeConnectorPtr props, const char *name, uint64_t *result) {
for(int i = 0; i < props->count_props; ++i) {
drmModePropertyPtr prop = drmModeGetProperty(drmfd, props->props[i]);
if(prop) {
if(strcmp(name, prop->name) == 0) {
*result = props->prop_values[i];
drmModeFreeProperty(prop);
return true;
}
drmModeFreeProperty(prop);
}
}
return false;
}
static void for_each_active_monitor_output_drm(const char *card_path, active_monitor_callback callback, void *userdata) {
int fd = open(card_path, O_RDONLY);
if(fd == -1) {
fprintf(stderr, "gsr error: for_each_active_monitor_output_drm failed, failed to open \"%s\", error: %s\n", card_path, strerror(errno));
return;
}
drmSetClientCap(fd, DRM_CLIENT_CAP_ATOMIC, 1);
char display_name[256];
drmModeResPtr resources = drmModeGetResources(fd);
if(resources) {
for(int i = 0; i < resources->count_connectors; ++i) {
drmModeConnectorPtr connector = drmModeGetConnectorCurrent(fd, resources->connectors[i]);
if(!connector)
continue;
if(connector->connection != DRM_MODE_CONNECTED) {
drmModeFreeConnector(connector);
continue;
}
uint64_t crtc_id = 0;
connector_get_property_by_name(fd, connector, "CRTC_ID", &crtc_id);
drmModeCrtcPtr crtc = drmModeGetCrtc(fd, crtc_id);
const char *connection_name = drmModeGetConnectorTypeName(connector->connector_type);
if(connection_name && crtc_id > 0 && crtc) {
const int connector_type_index_name = get_connector_type_by_name(display_name);
const int display_name_len = snprintf(display_name, sizeof(display_name), "%s-%u", connection_name, connector->connector_type_id);
const gsr_monitor monitor = {
.name = display_name,
.name_len = display_name_len,
.pos = { .x = crtc->x, .y = crtc->y },
.size = { .x = (int)crtc->width, .y = (int)crtc->height },
.connector_id = connector->connector_id,
.rotation = GSR_MONITOR_ROT_0,
.monitor_identifier = connector_type_index_name != -1 ? monitor_identifier_from_type_and_count(connector_type_index_name, connector->connector_type_id) : 0
};
callback(&monitor, userdata);
}
if(crtc)
drmModeFreeCrtc(crtc);
drmModeFreeConnector(connector);
}
drmModeFreeResources(resources);
}
close(fd);
}
void for_each_active_monitor_output(const gsr_window *window, const char *card_path, gsr_connection_type connection_type, active_monitor_callback callback, void *userdata) {
switch(connection_type) {
case GSR_CONNECTION_X11:
case GSR_CONNECTION_WAYLAND:
gsr_window_for_each_active_monitor_output_cached(window, callback, userdata);
break;
case GSR_CONNECTION_DRM:
for_each_active_monitor_output_drm(card_path, callback, userdata);
break;
}
}
static void get_monitor_by_name_callback(const gsr_monitor *monitor, void *userdata) {
get_monitor_by_name_userdata *data = (get_monitor_by_name_userdata*)userdata;
if(!data->found_monitor && strcmp(data->name, monitor->name) == 0) {
data->monitor->pos = monitor->pos;
data->monitor->size = monitor->size;
data->monitor->connector_id = monitor->connector_id;
data->monitor->rotation = monitor->rotation;
data->monitor->monitor_identifier = monitor->monitor_identifier;
data->found_monitor = true;
}
}
bool get_monitor_by_name(const gsr_egl *egl, gsr_connection_type connection_type, const char *name, gsr_monitor *monitor) {
get_monitor_by_name_userdata userdata;
userdata.name = name;
userdata.name_len = strlen(name);
userdata.monitor = monitor;
userdata.found_monitor = false;
for_each_active_monitor_output(egl->window, egl->card_path, connection_type, get_monitor_by_name_callback, &userdata);
return userdata.found_monitor;
}
typedef struct {
const gsr_monitor *monitor;
gsr_monitor_rotation rotation;
vec2i position;
bool match_found;
} get_monitor_by_connector_id_userdata;
static void get_monitor_by_name_wayland_callback(const gsr_monitor *monitor, void *userdata) {
get_monitor_by_connector_id_userdata *data = (get_monitor_by_connector_id_userdata*)userdata;
if(monitor->name && data->monitor->name && strcmp(monitor->name, data->monitor->name) == 0) {
data->rotation = monitor->rotation;
data->position = monitor->pos;
data->match_found = true;
}
}
static void get_monitor_by_connector_id_callback(const gsr_monitor *monitor, void *userdata) {
get_monitor_by_connector_id_userdata *data = (get_monitor_by_connector_id_userdata*)userdata;
if(monitor->connector_id == data->monitor->connector_id ||
(!monitor->connector_id && monitor->monitor_identifier == data->monitor->monitor_identifier))
{
data->rotation = monitor->rotation;
data->position = monitor->pos;
data->match_found = true;
}
}
bool drm_monitor_get_display_server_data(const gsr_window *window, const gsr_monitor *monitor, gsr_monitor_rotation *monitor_rotation, vec2i *monitor_position) {
*monitor_rotation = GSR_MONITOR_ROT_0;
*monitor_position = (vec2i){0, 0};
if(gsr_window_get_display_server(window) == GSR_DISPLAY_SERVER_WAYLAND) {
{
get_monitor_by_connector_id_userdata userdata;
userdata.monitor = monitor;
userdata.rotation = GSR_MONITOR_ROT_0;
userdata.position = (vec2i){0, 0};
userdata.match_found = false;
gsr_window_for_each_active_monitor_output_cached(window, get_monitor_by_name_wayland_callback, &userdata);
if(userdata.match_found) {
*monitor_rotation = userdata.rotation;
*monitor_position = userdata.position;
return true;
}
}
{
get_monitor_by_connector_id_userdata userdata;
userdata.monitor = monitor;
userdata.rotation = GSR_MONITOR_ROT_0;
userdata.position = (vec2i){0, 0};
userdata.match_found = false;
gsr_window_for_each_active_monitor_output_cached(window, get_monitor_by_connector_id_callback, &userdata);
*monitor_rotation = userdata.rotation;
*monitor_position = userdata.position;
return userdata.match_found;
}
} else {
get_monitor_by_connector_id_userdata userdata;
userdata.monitor = monitor;
userdata.rotation = GSR_MONITOR_ROT_0;
userdata.position = (vec2i){0, 0};
userdata.match_found = false;
gsr_window_for_each_active_monitor_output_cached(window, get_monitor_by_connector_id_callback, &userdata);
*monitor_rotation = userdata.rotation;
*monitor_position = userdata.position;
return userdata.match_found;
}
}
bool gl_get_gpu_info(gsr_egl *egl, gsr_gpu_info *info) {
const char *software_renderers[] = { "llvmpipe", "SWR", "softpipe", NULL };
bool supported = true;
const unsigned char *gl_vendor = egl->glGetString(GL_VENDOR);
const unsigned char *gl_renderer = egl->glGetString(GL_RENDERER);
info->gpu_version = 0;
info->is_steam_deck = false;
if(!gl_vendor) {
fprintf(stderr, "gsr error: failed to get gpu vendor\n");
supported = false;
goto end;
}
if(gl_renderer) {
for(int i = 0; software_renderers[i]; ++i) {
if(strstr((const char*)gl_renderer, software_renderers[i])) {
fprintf(stderr, "gsr error: your opengl environment is not properly setup. It's using %s (software rendering) for opengl instead of your graphics card. Please make sure your graphics driver is properly installed\n", software_renderers[i]);
supported = false;
goto end;
}
}
}
if(strstr((const char*)gl_vendor, "AMD"))
info->vendor = GSR_GPU_VENDOR_AMD;
else if(strstr((const char*)gl_vendor, "Mesa") && gl_renderer && strstr((const char*)gl_renderer, "AMD"))
info->vendor = GSR_GPU_VENDOR_AMD;
else if(strstr((const char*)gl_vendor, "Intel"))
info->vendor = GSR_GPU_VENDOR_INTEL;
else if(strstr((const char*)gl_vendor, "NVIDIA"))
info->vendor = GSR_GPU_VENDOR_NVIDIA;
else if(strstr((const char*)gl_vendor, "Broadcom"))
info->vendor = GSR_GPU_VENDOR_BROADCOM;
else {
fprintf(stderr, "gsr error: unknown gpu vendor: %s\n", gl_vendor);
supported = false;
goto end;
}
if(gl_renderer) {
if(info->vendor == GSR_GPU_VENDOR_NVIDIA)
sscanf((const char*)gl_renderer, "%*s %*s %*s %d", &info->gpu_version);
info->is_steam_deck = strstr((const char*)gl_renderer, "vangogh") != NULL;
}
end:
return supported;
}
bool try_card_has_valid_plane(const char *card_path) {
drmVersion *ver = NULL;
drmModePlaneResPtr planes = NULL;
bool found_screen_card = false;
int fd = open(card_path, O_RDONLY);
if(fd == -1)
return false;
ver = drmGetVersion(fd);
if(!ver || strstr(ver->name, "nouveau"))
goto next;
drmSetClientCap(fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
planes = drmModeGetPlaneResources(fd);
if(!planes)
goto next;
for(uint32_t j = 0; j < planes->count_planes; ++j) {
drmModePlanePtr plane = drmModeGetPlane(fd, planes->planes[j]);
if(!plane)
continue;
if(plane->fb_id)
found_screen_card = true;
drmModeFreePlane(plane);
if(found_screen_card)
break;
}
next:
if(planes)
drmModeFreePlaneResources(planes);
if(ver)
drmFreeVersion(ver);
close(fd);
if(found_screen_card)
return true;
return false;
}
bool gsr_get_valid_card_path(gsr_egl *egl, char *output, bool is_monitor_capture) {
if(egl->dri_card_path) {
snprintf(output, 128, "%s", egl->dri_card_path);
return is_monitor_capture ? try_card_has_valid_plane(output) : true;
}
for(int i = 0; i < 10; ++i) {
snprintf(output, 128, DRM_DEV_NAME, DRM_DIR_NAME, i);
if(try_card_has_valid_plane(output))
return true;
}
return false;
}
bool gsr_card_path_get_render_path(const char *card_path, char *render_path) {
int fd = open(card_path, O_RDONLY);
if(fd == -1)
return false;
char *render_path_tmp = drmGetRenderDeviceNameFromFd(fd);
if(render_path_tmp) {
snprintf(render_path, 128, "%s", render_path_tmp);
free(render_path_tmp);
close(fd);
return true;
}
close(fd);
return false;
}
int create_directory_recursive(char *path) {
int path_len = strlen(path);
char *p = path;
char *end = path + path_len;
for(;;) {
char *slash_p = strchr(p, '/');
// Skips first '/', we don't want to try and create the root directory
if(slash_p == path) {
++p;
continue;
}
if(!slash_p)
slash_p = end;
char prev_char = *slash_p;
*slash_p = '\0';
int err = mkdir(path, S_IRWXU);
*slash_p = prev_char;
if(err == -1 && errno != EEXIST)
return err;
if(slash_p == end)
break;
else
p = slash_p + 1;
}
return 0;
}
void setup_dma_buf_attrs(intptr_t *img_attr, uint32_t format, uint32_t width, uint32_t height, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes, bool use_modifier) {
const uint32_t plane_fd_attrs[DRM_NUM_BUF_ATTRS] = {
EGL_DMA_BUF_PLANE0_FD_EXT,
EGL_DMA_BUF_PLANE1_FD_EXT,
EGL_DMA_BUF_PLANE2_FD_EXT,
EGL_DMA_BUF_PLANE3_FD_EXT
};
const uint32_t plane_offset_attrs[DRM_NUM_BUF_ATTRS] = {
EGL_DMA_BUF_PLANE0_OFFSET_EXT,
EGL_DMA_BUF_PLANE1_OFFSET_EXT,
EGL_DMA_BUF_PLANE2_OFFSET_EXT,
EGL_DMA_BUF_PLANE3_OFFSET_EXT
};
const uint32_t plane_pitch_attrs[DRM_NUM_BUF_ATTRS] = {
EGL_DMA_BUF_PLANE0_PITCH_EXT,
EGL_DMA_BUF_PLANE1_PITCH_EXT,
EGL_DMA_BUF_PLANE2_PITCH_EXT,
EGL_DMA_BUF_PLANE3_PITCH_EXT
};
const uint32_t plane_modifier_lo_attrs[DRM_NUM_BUF_ATTRS] = {
EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT,
EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT,
EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT,
EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT
};
const uint32_t plane_modifier_hi_attrs[DRM_NUM_BUF_ATTRS] = {
EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT,
EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT,
EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT,
EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT
};
size_t img_attr_index = 0;
img_attr[img_attr_index++] = EGL_LINUX_DRM_FOURCC_EXT;
img_attr[img_attr_index++] = format;
img_attr[img_attr_index++] = EGL_WIDTH;
img_attr[img_attr_index++] = width;
img_attr[img_attr_index++] = EGL_HEIGHT;
img_attr[img_attr_index++] = height;
assert(num_planes <= DRM_NUM_BUF_ATTRS);
for(int i = 0; i < num_planes; ++i) {
img_attr[img_attr_index++] = plane_fd_attrs[i];
img_attr[img_attr_index++] = fds[i];
img_attr[img_attr_index++] = plane_offset_attrs[i];
img_attr[img_attr_index++] = offsets[i];
img_attr[img_attr_index++] = plane_pitch_attrs[i];
img_attr[img_attr_index++] = pitches[i];
if(use_modifier) {
img_attr[img_attr_index++] = plane_modifier_lo_attrs[i];
img_attr[img_attr_index++] = modifiers[i] & 0xFFFFFFFFULL;
img_attr[img_attr_index++] = plane_modifier_hi_attrs[i];
img_attr[img_attr_index++] = modifiers[i] >> 32ULL;
}
}
img_attr[img_attr_index++] = EGL_NONE;
assert(img_attr_index <= 44);
}
vec2i scale_keep_aspect_ratio(vec2i from, vec2i to) {
if(from.x == 0 || from.y == 0)
return (vec2i){0, 0};
const double height_to_width_ratio = (double)from.y / (double)from.x;
from.x = to.x;
from.y = from.x * height_to_width_ratio;
if(from.y > to.y) {
const double width_height_ratio = (double)from.x / (double)from.y;
from.y = to.y;
from.x = from.y * width_height_ratio;
}
return from;
}
vec2i gsr_capture_get_target_position(vec2i output_size, gsr_capture_metadata *capture_metadata) {
vec2i target_pos = {0, 0};
switch(capture_metadata->halign) {
case GSR_CAPTURE_ALIGN_START:
break;
case GSR_CAPTURE_ALIGN_CENTER:
target_pos.x = capture_metadata->video_size.x/2 - output_size.x/2;
break;
case GSR_CAPTURE_ALIGN_END:
target_pos.x = capture_metadata->video_size.x - output_size.x;
break;
}
switch(capture_metadata->valign) {
case GSR_CAPTURE_ALIGN_START:
break;
case GSR_CAPTURE_ALIGN_CENTER:
target_pos.y = capture_metadata->video_size.y/2 - output_size.y/2;
break;
case GSR_CAPTURE_ALIGN_END:
target_pos.y = capture_metadata->video_size.y - output_size.y;
break;
}
target_pos.x += capture_metadata->position.x;
target_pos.y += capture_metadata->position.y;
return target_pos;
}
unsigned int gl_create_texture(gsr_egl *egl, int width, int height, int internal_format, unsigned int format, int filter) {
unsigned int texture_id = 0;
egl->glGenTextures(1, &texture_id);
egl->glBindTexture(GL_TEXTURE_2D, texture_id);
//egl->glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, GL_UNSIGNED_BYTE, NULL);
// Needed for hevc_10bit for nvenc (cuGraphicsGLRegisterImage)
egl->glTexStorage2D(GL_TEXTURE_2D, 1, internal_format, width, height);
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, filter);
egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, filter);
egl->glBindTexture(GL_TEXTURE_2D, 0);
return texture_id;
}
/* TODO: Test with optimus and open kernel modules */
bool get_nvidia_driver_version(int *major, int *minor) {
*major = 0;
*minor = 0;
FILE *f = fopen("/proc/driver/nvidia/version", "rb");
if(!f) {
fprintf(stderr, "gsr warning: failed to get nvidia driver version (failed to read /proc/driver/nvidia/version)\n");
return false;
}
char buffer[2048];
size_t bytes_read = fread(buffer, 1, sizeof(buffer) - 1, f);
buffer[bytes_read] = '\0';
bool success = false;
const char *p = strstr(buffer, "Kernel Module");
if(p) {
p += 13;
int driver_major_version = 0, driver_minor_version = 0;
if(sscanf(p, "%d.%d", &driver_major_version, &driver_minor_version) == 2) {
*major = driver_major_version;
*minor = driver_minor_version;
success = true;
}
}
if(!success)
fprintf(stderr, "gsr warning: failed to get nvidia driver version\n");
fclose(f);
return success;
}

391
src/window/wayland.c Normal file
View File

@ -0,0 +1,391 @@
#include "../../include/window/wayland.h"
#include "../../include/vec2.h"
#include "../../include/defs.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <wayland-client.h>
#include <wayland-egl.h>
#include "xdg-output-unstable-v1-client-protocol.h"
#define GSR_MAX_OUTPUTS 32
typedef struct gsr_window_wayland gsr_window_wayland;
typedef struct {
uint32_t wl_name;
struct wl_output *output;
struct zxdg_output_v1 *xdg_output;
vec2i pos;
vec2i size;
int32_t transform;
char *name;
} gsr_wayland_output;
struct gsr_window_wayland {
struct wl_display *display;
struct wl_egl_window *window;
struct wl_registry *registry;
struct wl_surface *surface;
struct wl_compositor *compositor;
gsr_wayland_output outputs[GSR_MAX_OUTPUTS];
int num_outputs;
struct zxdg_output_manager_v1 *xdg_output_manager;
};
static void output_handle_geometry(void *data, struct wl_output *wl_output,
int32_t x, int32_t y, int32_t phys_width, int32_t phys_height,
int32_t subpixel, const char *make, const char *model,
int32_t transform) {
(void)wl_output;
(void)phys_width;
(void)phys_height;
(void)subpixel;
(void)make;
(void)model;
gsr_wayland_output *gsr_output = data;
gsr_output->pos.x = x;
gsr_output->pos.y = y;
gsr_output->transform = transform;
}
static void output_handle_mode(void *data, struct wl_output *wl_output, uint32_t flags, int32_t width, int32_t height, int32_t refresh) {
(void)wl_output;
(void)flags;
(void)refresh;
gsr_wayland_output *gsr_output = data;
gsr_output->size.x = width;
gsr_output->size.y = height;
}
static void output_handle_done(void *data, struct wl_output *wl_output) {
(void)data;
(void)wl_output;
}
static void output_handle_scale(void* data, struct wl_output *wl_output, int32_t factor) {
(void)data;
(void)wl_output;
(void)factor;
}
static void output_handle_name(void *data, struct wl_output *wl_output, const char *name) {
(void)wl_output;
gsr_wayland_output *gsr_output = data;
if(gsr_output->name) {
free(gsr_output->name);
gsr_output->name = NULL;
}
gsr_output->name = strdup(name);
}
static void output_handle_description(void *data, struct wl_output *wl_output, const char *description) {
(void)data;
(void)wl_output;
(void)description;
}
static const struct wl_output_listener output_listener = {
.geometry = output_handle_geometry,
.mode = output_handle_mode,
.done = output_handle_done,
.scale = output_handle_scale,
.name = output_handle_name,
.description = output_handle_description,
};
static void registry_add_object(void *data, struct wl_registry *registry, uint32_t name, const char *interface, uint32_t version) {
(void)version;
gsr_window_wayland *window_wayland = data;
if(strcmp(interface, "wl_compositor") == 0) {
if(window_wayland->compositor)
return;
window_wayland->compositor = wl_registry_bind(registry, name, &wl_compositor_interface, 1);
} else if(strcmp(interface, wl_output_interface.name) == 0) {
if(version < 4) {
fprintf(stderr, "gsr warning: wl output interface version is < 4, expected >= 4 to capture a monitor\n");
return;
}
if(window_wayland->num_outputs == GSR_MAX_OUTPUTS) {
fprintf(stderr, "gsr warning: reached maximum outputs (%d), ignoring output %u\n", GSR_MAX_OUTPUTS, name);
return;
}
gsr_wayland_output *gsr_output = &window_wayland->outputs[window_wayland->num_outputs];
window_wayland->num_outputs++;
*gsr_output = (gsr_wayland_output) {
.wl_name = name,
.output = wl_registry_bind(registry, name, &wl_output_interface, 4),
.pos = { .x = 0, .y = 0 },
.size = { .x = 0, .y = 0 },
.transform = 0,
.name = NULL,
};
wl_output_add_listener(gsr_output->output, &output_listener, gsr_output);
} else if(strcmp(interface, zxdg_output_manager_v1_interface.name) == 0) {
if(version < 1) {
fprintf(stderr, "gsr warning: xdg output interface version is < 1, expected >= 1 to capture a monitor\n");
return;
}
if(window_wayland->xdg_output_manager)
return;
window_wayland->xdg_output_manager = wl_registry_bind(registry, name, &zxdg_output_manager_v1_interface, 1);
}
}
static void registry_remove_object(void *data, struct wl_registry *registry, uint32_t name) {
(void)data;
(void)registry;
(void)name;
// TODO: Remove output
}
static struct wl_registry_listener registry_listener = {
.global = registry_add_object,
.global_remove = registry_remove_object,
};
static void xdg_output_logical_position(void *data, struct zxdg_output_v1 *zxdg_output_v1, int32_t x, int32_t y) {
(void)zxdg_output_v1;
gsr_wayland_output *gsr_xdg_output = data;
gsr_xdg_output->pos.x = x;
gsr_xdg_output->pos.y = y;
}
static void xdg_output_handle_logical_size(void *data, struct zxdg_output_v1 *xdg_output, int32_t width, int32_t height) {
(void)data;
(void)xdg_output;
(void)width;
(void)height;
}
static void xdg_output_handle_done(void *data, struct zxdg_output_v1 *xdg_output) {
(void)data;
(void)xdg_output;
}
static void xdg_output_handle_name(void *data, struct zxdg_output_v1 *xdg_output, const char *name) {
(void)data;
(void)xdg_output;
(void)name;
}
static void xdg_output_handle_description(void *data, struct zxdg_output_v1 *xdg_output, const char *description) {
(void)data;
(void)xdg_output;
(void)description;
}
static const struct zxdg_output_v1_listener xdg_output_listener = {
.logical_position = xdg_output_logical_position,
.logical_size = xdg_output_handle_logical_size,
.done = xdg_output_handle_done,
.name = xdg_output_handle_name,
.description = xdg_output_handle_description,
};
static void gsr_window_wayland_set_monitor_outputs_from_xdg_output(gsr_window_wayland *self) {
if(!self->xdg_output_manager) {
fprintf(stderr, "gsr warning: zxdg_output_manager not found. registered monitor positions might be incorrect\n");
return;
}
for(int i = 0; i < self->num_outputs; ++i) {
self->outputs[i].xdg_output = zxdg_output_manager_v1_get_xdg_output(self->xdg_output_manager, self->outputs[i].output);
zxdg_output_v1_add_listener(self->outputs[i].xdg_output, &xdg_output_listener, &self->outputs[i]);
}
// Fetch xdg_output
wl_display_roundtrip(self->display);
}
static void gsr_window_wayland_deinit(gsr_window_wayland *self) {
if(self->window) {
wl_egl_window_destroy(self->window);
self->window = NULL;
}
if(self->surface) {
wl_surface_destroy(self->surface);
self->surface = NULL;
}
for(int i = 0; i < self->num_outputs; ++i) {
if(self->outputs[i].output) {
wl_output_destroy(self->outputs[i].output);
self->outputs[i].output = NULL;
}
if(self->outputs[i].name) {
free(self->outputs[i].name);
self->outputs[i].name = NULL;
}
if(self->outputs[i].xdg_output) {
zxdg_output_v1_destroy(self->outputs[i].xdg_output);
self->outputs[i].output = NULL;
}
}
self->num_outputs = 0;
if(self->xdg_output_manager) {
zxdg_output_manager_v1_destroy(self->xdg_output_manager);
self->xdg_output_manager = NULL;
}
if(self->compositor) {
wl_compositor_destroy(self->compositor);
self->compositor = NULL;
}
if(self->registry) {
wl_registry_destroy(self->registry);
self->registry = NULL;
}
if(self->display) {
wl_display_disconnect(self->display);
self->display = NULL;
}
}
static bool gsr_window_wayland_init(gsr_window_wayland *self) {
self->display = wl_display_connect(NULL);
if(!self->display) {
fprintf(stderr, "gsr error: gsr_window_wayland_init failed: failed to connect to the Wayland server\n");
goto fail;
}
self->registry = wl_display_get_registry(self->display); // TODO: Error checking
wl_registry_add_listener(self->registry, &registry_listener, self); // TODO: Error checking
// Fetch globals
wl_display_roundtrip(self->display);
// Fetch wl_output
wl_display_roundtrip(self->display);
gsr_window_wayland_set_monitor_outputs_from_xdg_output(self);
if(!self->compositor) {
fprintf(stderr, "gsr error: gsr_window_wayland_init failed: failed to find compositor\n");
goto fail;
}
self->surface = wl_compositor_create_surface(self->compositor);
if(!self->surface) {
fprintf(stderr, "gsr error: gsr_window_wayland_init failed: failed to create surface\n");
goto fail;
}
self->window = wl_egl_window_create(self->surface, 16, 16);
if(!self->window) {
fprintf(stderr, "gsr error: gsr_window_wayland_init failed: failed to create window\n");
goto fail;
}
return true;
fail:
gsr_window_wayland_deinit(self);
return false;
}
static void gsr_window_wayland_destroy(gsr_window *window) {
gsr_window_wayland *self = window->priv;
gsr_window_wayland_deinit(self);
free(self);
free(window);
}
static bool gsr_window_wayland_process_event(gsr_window *window) {
gsr_window_wayland *self = window->priv;
// TODO: pselect on wl_display_get_fd before doing dispatch
const bool events_available = wl_display_dispatch_pending(self->display) > 0;
wl_display_flush(self->display);
return events_available;
}
static gsr_display_server gsr_wayland_get_display_server(void) {
return GSR_DISPLAY_SERVER_WAYLAND;
}
static void* gsr_window_wayland_get_display(gsr_window *window) {
gsr_window_wayland *self = window->priv;
return self->display;
}
static void* gsr_window_wayland_get_window(gsr_window *window) {
gsr_window_wayland *self = window->priv;
return self->window;
}
static gsr_monitor_rotation wayland_transform_to_gsr_rotation(int32_t rot) {
switch(rot) {
case 0: return GSR_MONITOR_ROT_0;
case 1: return GSR_MONITOR_ROT_90;
case 2: return GSR_MONITOR_ROT_180;
case 3: return GSR_MONITOR_ROT_270;
}
return GSR_MONITOR_ROT_0;
}
static void gsr_window_wayland_for_each_active_monitor_output_cached(const gsr_window *window, active_monitor_callback callback, void *userdata) {
const gsr_window_wayland *self = window->priv;
for(int i = 0; i < self->num_outputs; ++i) {
const gsr_wayland_output *output = &self->outputs[i];
if(!output->name)
continue;
const int connector_type_index = get_connector_type_by_name(output->name);
const int connector_type_id = get_connector_type_id_by_name(output->name);
const gsr_monitor monitor = {
.name = output->name,
.name_len = strlen(output->name),
.pos = { .x = output->pos.x, .y = output->pos.y },
.size = { .x = output->size.x, .y = output->size.y },
.connector_id = 0,
.rotation = wayland_transform_to_gsr_rotation(output->transform),
.monitor_identifier = (connector_type_index != -1 && connector_type_id != -1) ? monitor_identifier_from_type_and_count(connector_type_index, connector_type_id) : 0
};
callback(&monitor, userdata);
}
}
gsr_window* gsr_window_wayland_create(void) {
gsr_window *window = calloc(1, sizeof(gsr_window));
if(!window)
return window;
gsr_window_wayland *window_wayland = calloc(1, sizeof(gsr_window_wayland));
if(!window_wayland) {
free(window);
return NULL;
}
if(!gsr_window_wayland_init(window_wayland)) {
free(window_wayland);
free(window);
return NULL;
}
*window = (gsr_window) {
.destroy = gsr_window_wayland_destroy,
.process_event = gsr_window_wayland_process_event,
.get_event_data = NULL,
.get_display_server = gsr_wayland_get_display_server,
.get_display = gsr_window_wayland_get_display,
.get_window = gsr_window_wayland_get_window,
.for_each_active_monitor_output_cached = gsr_window_wayland_for_each_active_monitor_output_cached,
.priv = window_wayland
};
return window;
}

30
src/window/window.c Normal file
View File

@ -0,0 +1,30 @@
#include "../../include/window/window.h"
#include <stddef.h>
void gsr_window_destroy(gsr_window *self);
bool gsr_window_process_event(gsr_window *self) {
return self->process_event(self);
}
XEvent* gsr_window_get_event_data(gsr_window *self) {
if(self->get_event_data)
return self->get_event_data(self);
return NULL;
}
gsr_display_server gsr_window_get_display_server(const gsr_window *self) {
return self->get_display_server();
}
void* gsr_window_get_display(gsr_window *self) {
return self->get_display(self);
}
void* gsr_window_get_window(gsr_window *self) {
return self->get_window(self);
}
void gsr_window_for_each_active_monitor_output_cached(const gsr_window *self, active_monitor_callback callback, void *userdata) {
self->for_each_active_monitor_output_cached(self, callback, userdata);
}

162
src/window/x11.c Normal file
View File

@ -0,0 +1,162 @@
#include "../../include/window/x11.h"
#include "../../include/vec2.h"
#include "../../include/defs.h"
#include "../../include/utils.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <X11/Xlib.h>
#define GSR_MAX_OUTPUTS 32
typedef struct {
char *name;
vec2i pos;
vec2i size;
uint32_t connector_id;
gsr_monitor_rotation rotation;
uint32_t monitor_identifier; /* crtc id */
} gsr_x11_output;
typedef struct {
Display *display;
Window window;
gsr_x11_output outputs[GSR_MAX_OUTPUTS];
int num_outputs;
XEvent xev;
} gsr_window_x11;
static void store_x11_monitor(const gsr_monitor *monitor, void *userdata) {
gsr_window_x11 *window_x11 = userdata;
if(window_x11->num_outputs == GSR_MAX_OUTPUTS) {
fprintf(stderr, "gsr warning: reached maximum outputs (%d), ignoring output %s\n", GSR_MAX_OUTPUTS, monitor->name);
return;
}
char *monitor_name = strdup(monitor->name);
if(!monitor_name)
return;
const int index = window_x11->num_outputs;
window_x11->outputs[index].name = monitor_name;
window_x11->outputs[index].pos = monitor->pos;
window_x11->outputs[index].size = monitor->size;
window_x11->outputs[index].connector_id = monitor->connector_id;
window_x11->outputs[index].rotation = monitor->rotation;
window_x11->outputs[index].monitor_identifier = monitor->monitor_identifier;
++window_x11->num_outputs;
}
static void gsr_window_x11_deinit(gsr_window_x11 *self) {
if(self->window) {
XDestroyWindow(self->display, self->window);
self->window = None;
}
for(int i = 0; i < self->num_outputs; ++i) {
if(self->outputs[i].name) {
free(self->outputs[i].name);
self->outputs[i].name = NULL;
}
}
self->num_outputs = 0;
}
static bool gsr_window_x11_init(gsr_window_x11 *self) {
self->window = XCreateWindow(self->display, DefaultRootWindow(self->display), 0, 0, 16, 16, 0, CopyFromParent, InputOutput, CopyFromParent, 0, NULL);
if(!self->window) {
fprintf(stderr, "gsr error: gsr_window_x11_init failed: failed to create gl window\n");
return false;
}
self->num_outputs = 0;
for_each_active_monitor_output_x11_not_cached(self->display, store_x11_monitor, self);
return true;
}
static void gsr_window_x11_destroy(gsr_window *window) {
gsr_window_x11 *self = window->priv;
gsr_window_x11_deinit(self);
free(self);
free(window);
}
static bool gsr_window_x11_process_event(gsr_window *window) {
gsr_window_x11 *self = window->priv;
if(XPending(self->display)) {
XNextEvent(self->display, &self->xev);
return true;
}
return false;
}
static XEvent* gsr_window_x11_get_event_data(gsr_window *window) {
gsr_window_x11 *self = window->priv;
return &self->xev;
}
static gsr_display_server gsr_window_x11_get_display_server(void) {
return GSR_DISPLAY_SERVER_X11;
}
static void* gsr_window_x11_get_display(gsr_window *window) {
gsr_window_x11 *self = window->priv;
return self->display;
}
static void* gsr_window_x11_get_window(gsr_window *window) {
gsr_window_x11 *self = window->priv;
return (void*)self->window;
}
static void gsr_window_x11_for_each_active_monitor_output_cached(const gsr_window *window, active_monitor_callback callback, void *userdata) {
const gsr_window_x11 *self = window->priv;
for(int i = 0; i < self->num_outputs; ++i) {
const gsr_x11_output *output = &self->outputs[i];
const gsr_monitor monitor = {
.name = output->name,
.name_len = strlen(output->name),
.pos = output->pos,
.size = output->size,
.connector_id = output->connector_id,
.rotation = output->rotation,
.monitor_identifier = output->monitor_identifier
};
callback(&monitor, userdata);
}
}
gsr_window* gsr_window_x11_create(Display *display) {
gsr_window *window = calloc(1, sizeof(gsr_window));
if(!window)
return window;
gsr_window_x11 *window_x11 = calloc(1, sizeof(gsr_window_x11));
if(!window_x11) {
free(window);
return NULL;
}
window_x11->display = display;
if(!gsr_window_x11_init(window_x11)) {
free(window_x11);
free(window);
return NULL;
}
*window = (gsr_window) {
.destroy = gsr_window_x11_destroy,
.process_event = gsr_window_x11_process_event,
.get_event_data = gsr_window_x11_get_event_data,
.get_display_server = gsr_window_x11_get_display_server,
.get_display = gsr_window_x11_get_display,
.get_window = gsr_window_x11_get_window,
.for_each_active_monitor_output_cached = gsr_window_x11_for_each_active_monitor_output_cached,
.priv = window_x11
};
return window;
}

134
src/window_texture.c Normal file
View File

@ -0,0 +1,134 @@
#include "../include/window_texture.h"
#include <X11/extensions/Xcomposite.h>
static int x11_supports_composite_named_window_pixmap(Display *display) {
int extension_major;
int extension_minor;
if(!XCompositeQueryExtension(display, &extension_major, &extension_minor))
return 0;
int major_version;
int minor_version;
return XCompositeQueryVersion(display, &major_version, &minor_version) && (major_version > 0 || minor_version >= 2);
}
int window_texture_init(WindowTexture *window_texture, Display *display, Window window, gsr_egl *egl) {
window_texture->display = display;
window_texture->window = window;
window_texture->pixmap = None;
window_texture->image = NULL;
window_texture->texture_id = 0;
window_texture->redirected = 0;
window_texture->egl = egl;
window_texture->window_width = 0;
window_texture->window_height = 0;
if(!x11_supports_composite_named_window_pixmap(display))
return 1;
XCompositeRedirectWindow(display, window, CompositeRedirectAutomatic);
window_texture->redirected = 1;
return window_texture_on_resize(window_texture);
}
static void window_texture_cleanup(WindowTexture *self, int delete_texture) {
if(delete_texture && self->texture_id) {
self->egl->glDeleteTextures(1, &self->texture_id);
self->texture_id = 0;
}
if(self->image) {
self->egl->eglDestroyImage(self->egl->egl_display, self->image);
self->image = NULL;
}
if(self->pixmap) {
XFreePixmap(self->display, self->pixmap);
self->pixmap = None;
}
}
void window_texture_deinit(WindowTexture *self) {
if(self->redirected) {
XCompositeUnredirectWindow(self->display, self->window, CompositeRedirectAutomatic);
self->redirected = 0;
}
window_texture_cleanup(self, 1);
}
int window_texture_on_resize(WindowTexture *self) {
window_texture_cleanup(self, 0);
int result = 0;
Pixmap pixmap = None;
unsigned int texture_id = 0;
EGLImage image = NULL;
const intptr_t pixmap_attrs[] = {
EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
EGL_NONE,
};
Window root_window;
int window_x, window_y;
unsigned int window_border, window_depth;
XGetGeometry(self->display, self->window, &root_window, &window_x, &window_y, &self->window_width, &self->window_height, &window_border, &window_depth);
pixmap = XCompositeNameWindowPixmap(self->display, self->window);
if(!pixmap) {
result = 2;
goto cleanup;
}
if(self->texture_id == 0) {
self->egl->glGenTextures(1, &texture_id);
if(texture_id == 0) {
result = 3;
goto cleanup;
}
self->egl->glBindTexture(GL_TEXTURE_2D, texture_id);
} else {
self->egl->glBindTexture(GL_TEXTURE_2D, self->texture_id);
texture_id = self->texture_id;
}
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
while(self->egl->glGetError()) {}
while(self->egl->eglGetError() != EGL_SUCCESS) {}
image = self->egl->eglCreateImage(self->egl->egl_display, NULL, EGL_NATIVE_PIXMAP_KHR, (EGLClientBuffer)pixmap, pixmap_attrs);
if(!image) {
result = 4;
goto cleanup;
}
self->egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
if(self->egl->glGetError() != 0 || self->egl->eglGetError() != EGL_SUCCESS) {
result = 5;
goto cleanup;
}
self->pixmap = pixmap;
self->texture_id = texture_id;
self->image = image;
cleanup:
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
if(result != 0) {
if(image)
self->egl->eglDestroyImage(self->egl->egl_display, image);
if(texture_id != 0)
self->egl->glDeleteTextures(1, &texture_id);
if(pixmap)
XFreePixmap(self->display, pixmap);
}
return result;
}
unsigned int window_texture_get_opengl_texture_id(WindowTexture *self) {
return self->texture_id;
}

46
src/xnvctrl.c Normal file
View File

@ -0,0 +1,46 @@
#include "../include/xnvctrl.h"
#include "../include/library_loader.h"
#include <string.h>
#include <stdio.h>
#include <dlfcn.h>
bool gsr_xnvctrl_load(gsr_xnvctrl *self, Display *display) {
memset(self, 0, sizeof(gsr_xnvctrl));
self->display = display;
dlerror(); /* clear */
void *lib = dlopen("libXNVCtrl.so.0", RTLD_LAZY);
if(!lib) {
fprintf(stderr, "gsr error: gsr_xnvctrl_load failed: failed to load libXNVCtrl.so.0, error: %s\n", dlerror());
return false;
}
const dlsym_assign required_dlsym[] = {
{ (void**)&self->XNVCTRLQueryExtension, "XNVCTRLQueryExtension" },
{ (void**)&self->XNVCTRLSetTargetAttributeAndGetStatus, "XNVCTRLSetTargetAttributeAndGetStatus" },
{ (void**)&self->XNVCTRLQueryValidTargetAttributeValues, "XNVCTRLQueryValidTargetAttributeValues" },
{ (void**)&self->XNVCTRLQueryTargetStringAttribute, "XNVCTRLQueryTargetStringAttribute" },
{ NULL, NULL }
};
if(!dlsym_load_list(lib, required_dlsym)) {
fprintf(stderr, "gsr error: gsr_xnvctrl_load failed: missing required symbols in libXNVCtrl.so.0\n");
goto fail;
}
self->library = lib;
return true;
fail:
dlclose(lib);
memset(self, 0, sizeof(gsr_xnvctrl));
return false;
}
void gsr_xnvctrl_unload(gsr_xnvctrl *self) {
if(self->library) {
dlclose(self->library);
memset(self, 0, sizeof(gsr_xnvctrl));
}
}