How to Edit Picture Pixels in the vout thread?
Posted: 29 Aug 2024 14:26
Hi,
I have a video, on the server side, from which I have extracted an object. For each pixel of the object, I have extracted the (x,y) coordinates of the pixel and the (Y,U,V) values for the color. I have placed the object in a JSON file as (x,y) -> (Y,U,V).
Now, I want to paste the same object, on each frame on the client side, where client side video is played in VLC.
I put my code in video_ouput.c in function static int ThreadDisplayPreparePicture(vout_thread_t *vout, bool reuse, bool frame_by_frame). (The code is below).
- In function ThreadDisplayPreparePicture, I read the JSON file and save it in a data structure
- For each frame, I call function stitch_yuv to overlay the object on the frame.
However, the problem is that the object appears at the correct position but its exact mirror image also appears parallel on the x-axis.
Why is a mirror image occurring on the frame? I am only applying the (Y,U,V) on the (X,Y) coordinates from the JSON file.
Am I doing the right things? Is there any built in APIs to address this? Thanks in advance for your help.
// I have. added this function and called it from ThreadDisplayPreparePicture.
inline void stitch_yuv(picture_t *picture, VideoPoint *points, int num_points, float width_ratio, float height_ratio, bool interpolation)
{
int i_pitch[3];
uint8_t *p[3];
p[0] = picture->p[0].p_pixels;
p[1] = picture->p[1].p_pixels;
p[2] = picture->p[2].p_pixels;
i_pitch[0] = picture->p[0].i_pitch;
i_pitch[1] = picture->p[1].i_pitch;
i_pitch[2] = picture->p[2].i_pitch;
for (int point_count = 0; point_count < num_points; point_count++)
{
VideoPoint *point = &points[point_count];
float i_x = point->pos._x * width_ratio;
float i_y = point->pos._y * height_ratio;
p[0][(int)(i_y * i_pitch[0] + i_x)] = point->yuv._y;
p[1][(int)(i_y / 2 * i_pitch[1] + i_x / 2)] = point->yuv._u;
p[2][(int)(i_y / 2 * i_pitch[2] + i_x / 2)] = point->yuv._v;
}
}
/*
This is the same function as video_output.c: static int *PreparePicture
*/
static int JSON_NUM = 1;
static int ThreadDisplayPreparePicture(vout_thread_t *vout, bool reuse, bool frame_by_frame)
{
bool is_late_dropped = vout->p->is_late_dropped && !vout->p->pause.is_on && !frame_by_frame;
vlc_mutex_lock(&vout->p->filter.lock);
picture_t *picture = filter_chain_VideoFilter(vout->p->filter.chain_static, NULL);
assert(!reuse || !picture);
while (!picture)
{
picture_t *decoded;
if (reuse && vout->p->displayed.decoded)
{
decoded = picture_Hold(vout->p->displayed.decoded);
msg_Info(vout, "ThreadDisplayPreparePicture: reuse picture");
}
else
{
decoded = picture_fifo_Pop(vout->p->decoder_fifo);
if (decoded)
{
if (is_late_dropped && !decoded->b_force)
{
vlc_tick_t late_threshold;
if (decoded->format.i_frame_rate && decoded->format.i_frame_rate_base)
{
late_threshold = ((CLOCK_FREQ / 2) * decoded->format.i_frame_rate_base) / decoded->format.i_frame_rate;
}
else
{
late_threshold = VOUT_DISPLAY_LATE_THRESHOLD;
}
const vlc_tick_t predicted = mdate() + 0; /* TODO improve */
const vlc_tick_t late = predicted - decoded->date;
if (late > late_threshold)
{
msg_Warn(vout, "picture is too late to be displayed (missing %" PRId64 " ms)", late / 1000);
picture_Release(decoded);
vout_statistic_AddLost(&vout->p->statistic, 1);
continue;
}
else if (late > 0)
{
msg_Dbg(vout, "picture might be displayed late (missing %" PRId64 " ms)", late / 1000);
}
}
if (!VideoFormatIsCropArEqual(&decoded->format, &vout->p->filter.format))
ThreadChangeFilters(vout, &decoded->format, vout->p->filter.configuration, -1, true);
}
}
if (!decoded)
break;
reuse = false;
char json_path[256];
sprintf(json_path, "%s%d.json", _JSON_FILE_PATH_, JSON_NUM);
if (access(json_path, F_OK) == -1)
{
msg_Info(vout, "ThreadDisplayPreparePicture: json file %s not exist, back to 1", json_path);
JSON_NUM = 1;
sprintf(json_path, "%s%d.json", _JSON_FILE_PATH_, JSON_NUM);
}
JsonMetaData *jsonMetaData = parse_json_file(json_path);
if (jsonMetaData != NULL)
{
int num_shapes = jsonMetaData->num_shapes;
float width_ratio = (float)decoded->format.i_width / jsonMetaData->width;
float height_ratio = (float)decoded->format.i_height / jsonMetaData->height;
if (decoded->format.i_chroma == VLC_CODEC_I420)
{
for (int shape_count = 0; shape_count < num_shapes; shape_count++)
{
Shape *shape = &jsonMetaData->shapes[shape_count];
int num_points = shape->num_points;
VideoPoint *points = shape->points;
stitch_yuv(decoded, points, num_points, width_ratio, height_ratio, false);
}
}
}
if (vout->p->displayed.decoded)
picture_Release(vout->p->displayed.decoded);
vout->p->displayed.decoded = picture_Hold(decoded);
vout->p->displayed.timestamp = decoded->date;
vout->p->displayed.is_interlaced = !decoded->b_progressive;
picture = filter_chain_VideoFilter(vout->p->filter.chain_static, decoded);
}
vlc_mutex_unlock(&vout->p->filter.lock);
if (!picture)
return VLC_EGENERIC;
assert(!vout->p->displayed.next);
if (!vout->p->displayed.current)
vout->p->displayed.current = picture;
else
vout->p->displayed.next = picture;
// msg_Info(vout,"test8");
JSON_NUM++;
return VLC_SUCCESS;
}
I have a video, on the server side, from which I have extracted an object. For each pixel of the object, I have extracted the (x,y) coordinates of the pixel and the (Y,U,V) values for the color. I have placed the object in a JSON file as (x,y) -> (Y,U,V).
Now, I want to paste the same object, on each frame on the client side, where client side video is played in VLC.
I put my code in video_ouput.c in function static int ThreadDisplayPreparePicture(vout_thread_t *vout, bool reuse, bool frame_by_frame). (The code is below).
- In function ThreadDisplayPreparePicture, I read the JSON file and save it in a data structure
- For each frame, I call function stitch_yuv to overlay the object on the frame.
However, the problem is that the object appears at the correct position but its exact mirror image also appears parallel on the x-axis.
Why is a mirror image occurring on the frame? I am only applying the (Y,U,V) on the (X,Y) coordinates from the JSON file.
Am I doing the right things? Is there any built in APIs to address this? Thanks in advance for your help.
// I have. added this function and called it from ThreadDisplayPreparePicture.
inline void stitch_yuv(picture_t *picture, VideoPoint *points, int num_points, float width_ratio, float height_ratio, bool interpolation)
{
int i_pitch[3];
uint8_t *p[3];
p[0] = picture->p[0].p_pixels;
p[1] = picture->p[1].p_pixels;
p[2] = picture->p[2].p_pixels;
i_pitch[0] = picture->p[0].i_pitch;
i_pitch[1] = picture->p[1].i_pitch;
i_pitch[2] = picture->p[2].i_pitch;
for (int point_count = 0; point_count < num_points; point_count++)
{
VideoPoint *point = &points[point_count];
float i_x = point->pos._x * width_ratio;
float i_y = point->pos._y * height_ratio;
p[0][(int)(i_y * i_pitch[0] + i_x)] = point->yuv._y;
p[1][(int)(i_y / 2 * i_pitch[1] + i_x / 2)] = point->yuv._u;
p[2][(int)(i_y / 2 * i_pitch[2] + i_x / 2)] = point->yuv._v;
}
}
/*
This is the same function as video_output.c: static int *PreparePicture
*/
static int JSON_NUM = 1;
static int ThreadDisplayPreparePicture(vout_thread_t *vout, bool reuse, bool frame_by_frame)
{
bool is_late_dropped = vout->p->is_late_dropped && !vout->p->pause.is_on && !frame_by_frame;
vlc_mutex_lock(&vout->p->filter.lock);
picture_t *picture = filter_chain_VideoFilter(vout->p->filter.chain_static, NULL);
assert(!reuse || !picture);
while (!picture)
{
picture_t *decoded;
if (reuse && vout->p->displayed.decoded)
{
decoded = picture_Hold(vout->p->displayed.decoded);
msg_Info(vout, "ThreadDisplayPreparePicture: reuse picture");
}
else
{
decoded = picture_fifo_Pop(vout->p->decoder_fifo);
if (decoded)
{
if (is_late_dropped && !decoded->b_force)
{
vlc_tick_t late_threshold;
if (decoded->format.i_frame_rate && decoded->format.i_frame_rate_base)
{
late_threshold = ((CLOCK_FREQ / 2) * decoded->format.i_frame_rate_base) / decoded->format.i_frame_rate;
}
else
{
late_threshold = VOUT_DISPLAY_LATE_THRESHOLD;
}
const vlc_tick_t predicted = mdate() + 0; /* TODO improve */
const vlc_tick_t late = predicted - decoded->date;
if (late > late_threshold)
{
msg_Warn(vout, "picture is too late to be displayed (missing %" PRId64 " ms)", late / 1000);
picture_Release(decoded);
vout_statistic_AddLost(&vout->p->statistic, 1);
continue;
}
else if (late > 0)
{
msg_Dbg(vout, "picture might be displayed late (missing %" PRId64 " ms)", late / 1000);
}
}
if (!VideoFormatIsCropArEqual(&decoded->format, &vout->p->filter.format))
ThreadChangeFilters(vout, &decoded->format, vout->p->filter.configuration, -1, true);
}
}
if (!decoded)
break;
reuse = false;
char json_path[256];
sprintf(json_path, "%s%d.json", _JSON_FILE_PATH_, JSON_NUM);
if (access(json_path, F_OK) == -1)
{
msg_Info(vout, "ThreadDisplayPreparePicture: json file %s not exist, back to 1", json_path);
JSON_NUM = 1;
sprintf(json_path, "%s%d.json", _JSON_FILE_PATH_, JSON_NUM);
}
JsonMetaData *jsonMetaData = parse_json_file(json_path);
if (jsonMetaData != NULL)
{
int num_shapes = jsonMetaData->num_shapes;
float width_ratio = (float)decoded->format.i_width / jsonMetaData->width;
float height_ratio = (float)decoded->format.i_height / jsonMetaData->height;
if (decoded->format.i_chroma == VLC_CODEC_I420)
{
for (int shape_count = 0; shape_count < num_shapes; shape_count++)
{
Shape *shape = &jsonMetaData->shapes[shape_count];
int num_points = shape->num_points;
VideoPoint *points = shape->points;
stitch_yuv(decoded, points, num_points, width_ratio, height_ratio, false);
}
}
}
if (vout->p->displayed.decoded)
picture_Release(vout->p->displayed.decoded);
vout->p->displayed.decoded = picture_Hold(decoded);
vout->p->displayed.timestamp = decoded->date;
vout->p->displayed.is_interlaced = !decoded->b_progressive;
picture = filter_chain_VideoFilter(vout->p->filter.chain_static, decoded);
}
vlc_mutex_unlock(&vout->p->filter.lock);
if (!picture)
return VLC_EGENERIC;
assert(!vout->p->displayed.next);
if (!vout->p->displayed.current)
vout->p->displayed.current = picture;
else
vout->p->displayed.next = picture;
// msg_Info(vout,"test8");
JSON_NUM++;
return VLC_SUCCESS;
}